Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

Pull networking updates from David Miller:

 1) Add TX fast path in mac80211, from Johannes Berg.

 2) Add TSO/GRO support to ibmveth, from Thomas Falcon

 3) Move away from cached routes in ipv6, just like ipv4, from Martin
    KaFai Lau.

 4) Lots of new rhashtable tests, from Thomas Graf.

 5) Run ingress qdisc lockless, from Alexei Starovoitov.

 6) Allow servers to fetch TCP packet headers for SYN packets of new
    connections, for fingerprinting.  From Eric Dumazet.

 7) Add mode parameter to pktgen, for testing receive.  From Alexei
    Starovoitov.

 8) Cache access optimizations via simplifications of build_skb(), from
    Alexander Duyck.

 9) Move page frag allocator under mm/, also from Alexander.

10) Add xmit_more support to hv_netvsc, from KY Srinivasan.

11) Add a counter guard in case we try to perform endless reclassify
    loops in the packet scheduler.

12) Extern flow dissector to be programmable and use it in new "Flower"
    classifier.  From Jiri Pirko.

13) AF_PACKET fanout rollover fixes, performance improvements, and new
    statistics.  From Willem de Bruijn.

14) Add netdev driver for GENEVE tunnels, from John W Linville.

15) Add ingress netfilter hooks and filtering, from Pablo Neira Ayuso.

16) Fix handling of epoll edge triggers in TCP, from Eric Dumazet.

17) Add an ECN retry fallback for the initial TCP handshake, from Daniel
    Borkmann.

18) Add tail call support to BPF, from Alexei Starovoitov.

19) Add several pktgen helper scripts, from Jesper Dangaard Brouer.

20) Add zerocopy support to AF_UNIX, from Hannes Frederic Sowa.

21) Favor even port numbers for allocation to connect() requests, and
    odd port numbers for bind(0), in an effort to help avoid
    ip_local_port_range exhaustion.  From Eric Dumazet.

22) Add Cavium ThunderX driver, from Sunil Goutham.

23) Allow bpf programs to access skb_iif and dev->ifindex SKB metadata,
    from Alexei Starovoitov.

24) Add support for T6 chips in cxgb4vf driver, from Hariprasad Shenai.

25) Double TCP Small Queues default to 256K to accomodate situations
    like the XEN driver and wireless aggregation.  From Wei Liu.

26) Add more entropy inputs to flow dissector, from Tom Herbert.

27) Add CDG congestion control algorithm to TCP, from Kenneth Klette
    Jonassen.

28) Convert ipset over to RCU locking, from Jozsef Kadlecsik.

29) Track and act upon link status of ipv4 route nexthops, from Andy
    Gospodarek.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1670 commits)
  bridge: vlan: flush the dynamically learned entries on port vlan delete
  bridge: multicast: add a comment to br_port_state_selection about blocking state
  net: inet_diag: export IPV6_V6ONLY sockopt
  stmmac: troubleshoot unexpected bits in des0 & des1
  net: ipv4 sysctl option to ignore routes when nexthop link is down
  net: track link-status of ipv4 nexthops
  net: switchdev: ignore unsupported bridge flags
  net: Cavium: Fix MAC address setting in shutdown state
  drivers: net: xgene: fix for ACPI support without ACPI
  ip: report the original address of ICMP messages
  net/mlx5e: Prefetch skb data on RX
  net/mlx5e: Pop cq outside mlx5e_get_cqe
  net/mlx5e: Remove mlx5e_cq.sqrq back-pointer
  net/mlx5e: Remove extra spaces
  net/mlx5e: Avoid TX CQE generation if more xmit packets expected
  net/mlx5e: Avoid redundant dev_kfree_skb() upon NOP completion
  net/mlx5e: Remove re-assignment of wq type in mlx5e_enable_rq()
  net/mlx5e: Use skb_shinfo(skb)->gso_segs rather than counting them
  net/mlx5e: Static mapping of netdev priv resources to/from netdev TX queues
  net/mlx4_en: Use HW counters for rx/tx bytes/packets in PF device
  ...
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio b/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio
new file mode 100644
index 0000000..4d08f28
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio
@@ -0,0 +1,8 @@
+What:		/sys/bus/pci/drivers/janz-cmodio/.../modulbus_number
+Date:		May 2010
+KernelVersion:	2.6.35
+Contact:	Ira W. Snyder <ira.snyder@gmail.com>
+Description:
+		Value representing the HEX switch S2 of the janz carrier board CMOD-IO or CAN-PCI2
+
+		Read-only: value of the configuration switch (0..15)
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 5ecfd72..668604f 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -39,6 +39,25 @@
 		Format is a string, e.g: 00:11:22:33:44:55 for an Ethernet MAC
 		address.
 
+What:		/sys/class/net/<bridge iface>/bridge/group_fwd_mask
+Date:		January 2012
+KernelVersion:	3.2
+Contact:	netdev@vger.kernel.org
+Description:
+		Bitmask to allow forwarding of link local frames with address
+		01-80-C2-00-00-0X on a bridge device. Only values that set bits
+		not matching BR_GROUPFWD_RESTRICTED in net/bridge/br_private.h
+		allowed.
+		Default value 0 does not forward any link local frames.
+
+		Restricted bits:
+		0: 01-80-C2-00-00-00 Bridge Group Address used for STP
+		1: 01-80-C2-00-00-01 (MAC Control) 802.3 used for MAC PAUSE
+		2: 01-80-C2-00-00-02 (Link Aggregation) 802.3ad
+
+		Any values not setting these bits can be used. Take special
+		care when forwarding control frames e.g. 802.1X-PAE or LLDP.
+
 What:		/sys/class/net/<iface>/broadcast
 Date:		April 2005
 KernelVersion:	2.6.12
diff --git a/Documentation/ABI/testing/sysfs-class-net-janz-ican3 b/Documentation/ABI/testing/sysfs-class-net-janz-ican3
new file mode 100644
index 0000000..fdbc03a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-janz-ican3
@@ -0,0 +1,19 @@
+What:		/sys/class/net/<iface>/termination
+Date:		May 2010
+KernelVersion:	2.6.35
+Contact:	Ira W. Snyder <ira.snyder@gmail.com>
+Description:
+		Value representing the can bus termination
+
+		Default: 1 (termination active)
+		Reading: get actual termination state
+		Writing: set actual termination state (0=no termination, 1=termination active)
+
+What:		/sys/class/net/<iface>/fwinfo
+Date:		May 2015
+KernelVersion:	3.19
+Contact:	Andreas Gröger <andreas24groeger@gmail.com>
+Description:
+		Firmware stamp of ican3 module
+		Read-only: 32 byte string identification of the ICAN3 module
+		(known values: "JANZ-ICAN3 ICANOS 1.xx", "JANZ-ICAN3 CAL/CANopen 1.xx")
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
deleted file mode 100644
index 8db3238..0000000
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-* AMD 10GbE PHY driver (amd-xgbe-phy)
-
-Required properties:
-- compatible: Should be "amd,xgbe-phy-seattle-v1a" and
-  "ethernet-phy-ieee802.3-c45"
-- reg: Address and length of the register sets for the device
-   - SerDes Rx/Tx registers
-   - SerDes integration registers (1/2)
-   - SerDes integration registers (2/2)
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
-- interrupts: Should contain the amd-xgbe-phy interrupt.
-
-Optional properties:
-- amd,speed-set: Speed capabilities of the device
-    0 - 1GbE and 10GbE (default)
-    1 - 2.5GbE and 10GbE
-
-The following optional properties are represented by an array with each
-value corresponding to a particular speed. The first array value represents
-the setting for the 1GbE speed, the second value for the 2.5GbE speed and
-the third value for the 10GbE speed.  All three values are required if the
-property is used.
-- amd,serdes-blwc: Baseline wandering correction enablement
-    0 - Off
-    1 - On
-- amd,serdes-cdr-rate: CDR rate speed selection
-- amd,serdes-pq-skew: PQ (data sampling) skew
-- amd,serdes-tx-amp: TX amplitude boost
-- amd,serdes-dfe-tap-config: DFE taps available to run
-- amd,serdes-dfe-tap-enable: DFE taps to enable
-
-Example:
-	xgbe_phy@e1240800 {
-		compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
-		reg = <0 0xe1240800 0 0x00400>,
-		      <0 0xe1250000 0 0x00060>,
-		      <0 0xe1250080 0 0x00004>;
-		interrupt-parent = <&gic>;
-		interrupts = <0 323 4>;
-		amd,speed-set = <0>;
-		amd,serdes-blwc = <1>, <1>, <0>;
-		amd,serdes-cdr-rate = <2>, <2>, <7>;
-		amd,serdes-pq-skew = <10>, <10>, <30>;
-		amd,serdes-tx-amp = <15>, <15>, <10>;
-		amd,serdes-dfe-tap-config = <3>, <3>, <1>;
-		amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
-	};
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 26efd52..4bb624a 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -5,12 +5,16 @@
 - reg: Address and length of the register sets for the device
    - MAC registers
    - PCS registers
+   - SerDes Rx/Tx registers
+   - SerDes integration registers (1/2)
+   - SerDes integration registers (2/2)
 - interrupt-parent: Should be the phandle for the interrupt controller
   that services interrupts for this device
 - interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
   listed is required and is the general device interrupt. If the optional
   amd,per-channel-interrupt property is specified, then one additional
-  interrupt for each DMA channel supported by the device should be specified
+  interrupt for each DMA channel supported by the device should be specified.
+  The last interrupt listed should be the PCS auto-negotiation interrupt.
 - clocks:
    - DMA clock for the amd-xgbe device (used for calculating the
      correct Rx interrupt watchdog timer value on a DMA channel
@@ -19,7 +23,6 @@
 - clock-names: Should be the names of the clocks
    - "dma_clk" for the DMA clock
    - "ptp_clk" for the PTP clock
-- phy-handle: See ethernet.txt file in the same directory
 - phy-mode: See ethernet.txt file in the same directory
 
 Optional properties:
@@ -29,19 +32,46 @@
 - amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
   a unique interrupt for each DMA channel - this requires an additional
   interrupt be configured for each DMA channel
+- amd,speed-set: Speed capabilities of the device
+    0 - 1GbE and 10GbE (default)
+    1 - 2.5GbE and 10GbE
+
+The following optional properties are represented by an array with each
+value corresponding to a particular speed. The first array value represents
+the setting for the 1GbE speed, the second value for the 2.5GbE speed and
+the third value for the 10GbE speed.  All three values are required if the
+property is used.
+- amd,serdes-blwc: Baseline wandering correction enablement
+    0 - Off
+    1 - On
+- amd,serdes-cdr-rate: CDR rate speed selection
+- amd,serdes-pq-skew: PQ (data sampling) skew
+- amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
 
 Example:
 	xgbe@e0700000 {
 		compatible = "amd,xgbe-seattle-v1a";
 		reg = <0 0xe0700000 0 0x80000>,
-		      <0 0xe0780000 0 0x80000>;
+		      <0 0xe0780000 0 0x80000>,
+		      <0 0xe1240800 0 0x00400>,
+		      <0 0xe1250000 0 0x00060>,
+		      <0 0xe1250080 0 0x00004>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 325 4>,
-			     <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
+			     <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>,
+			     <0 323 4>;
 		amd,per-channel-interrupt;
 		clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
 		clock-names = "dma_clk", "ptp_clk";
-		phy-handle = <&phy>;
 		phy-mode = "xgmii";
 		mac-address = [ 02 a1 a2 a3 a4 a5 ];
+		amd,speed-set = <0>;
+		amd,serdes-blwc = <1>, <1>, <0>;
+		amd,serdes-cdr-rate = <2>, <2>, <7>;
+		amd,serdes-pq-skew = <10>, <10>, <30>;
+		amd,serdes-tx-amp = <15>, <15>, <10>;
+		amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+		amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
 	};
diff --git a/Documentation/devicetree/bindings/net/ezchip_enet.txt b/Documentation/devicetree/bindings/net/ezchip_enet.txt
new file mode 100644
index 0000000..4e29b2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ezchip_enet.txt
@@ -0,0 +1,15 @@
+* EZchip NPS Management Ethernet port driver
+
+Required properties:
+- compatible: Should be "ezchip,nps-mgt-enet"
+- reg: Address and length of the register set for the device
+- interrupts: Should contain the ENET interrupt
+
+Examples:
+
+	ethernet@f0003000 {
+		compatible = "ezchip,nps-mgt-enet";
+		reg = <0xf0003000 0x44>;
+		interrupts = <7>;
+		mac-address = [ 00 11 22 33 44 55 ];
+	};
diff --git a/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt
new file mode 100644
index 0000000..6d7ab4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt
@@ -0,0 +1,35 @@
+* IPQ806x DWMAC Ethernet controller
+
+The device inherits all the properties of the dwmac/stmmac devices
+described in the file net/stmmac.txt with the following changes.
+
+Required properties:
+
+- compatible: should be "qcom,ipq806x-gmac" along with "snps,dwmac"
+	      and any applicable more detailed version number
+	      described in net/stmmac.txt
+
+- qcom,nss-common: should contain a phandle to a syscon device mapping the
+		   nss-common registers.
+
+- qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the
+		   qsgmii-csr registers.
+
+Example:
+
+	gmac: ethernet@37000000 {
+		device_type = "network";
+		compatible = "qcom,ipq806x-gmac";
+		reg = <0x37000000 0x200000>;
+		interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "macirq";
+
+		qcom,nss-common = <&nss_common>;
+		qcom,qsgmii-csr = <&qsgmii_csr>;
+
+		clocks = <&gcc GMAC_CORE1_CLK>;
+		clock-names = "stmmaceth";
+
+		resets = <&gcc GMAC_CORE1_RESET>;
+		reset-names = "stmmaceth";
+	};
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index ba19d67..b5d7976 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -7,8 +7,10 @@
   Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
-  Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
-  Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
+  Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+  Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
+  Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
+  Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
 - phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt b/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt
new file mode 100644
index 0000000..7c4a0cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt
@@ -0,0 +1,29 @@
+* Marvell International Ltd. NCI NFC Controller
+
+Required properties:
+- compatible: Should be "mrvl,nfc-uart".
+
+Optional SoC specific properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+- reset-n-io: Output GPIO pin used to reset the chip (active low).
+- hci-muxed: Specifies that the chip is muxing NCI over HCI frames.
+
+Optional UART-based chip specific properties:
+- flow-control: Specifies that the chip is using RTS/CTS.
+- break-control: Specifies that the chip needs specific break management.
+
+Example (for ARM-based BeagleBoard Black with 88W8887 on UART5):
+
+&uart5 {
+	status = "okay";
+
+	nfcmrvluart: nfcmrvluart@5 {
+		compatible = "mrvl,nfc-uart";
+
+		reset-n-io = <&gpio3 16 0>;
+
+		hci-muxed;
+		flow-control;
+        }
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt b/Documentation/devicetree/bindings/net/nfc/st-nci.txt
similarity index 86%
rename from Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
rename to Documentation/devicetree/bindings/net/nfc/st-nci.txt
index bb23707..d707588 100644
--- a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci.txt
@@ -1,7 +1,7 @@
-* STMicroelectronics SAS. ST21NFCB NFC Controller
+* STMicroelectronics SAS. ST NCI NFC Controller
 
 Required properties:
-- compatible: Should be "st,st21nfcb-i2c".
+- compatible: Should be "st,st21nfcb-i2c" or "st,st21nfcc-i2c".
 - clock-frequency: I²C work frequency.
 - reg: address on the bus
 - interrupt-parent: phandle for the interrupt gpio controller
diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
index 7c89ca2..32b35a0 100644
--- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
+++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
@@ -18,6 +18,9 @@
   "IRQ Status Read" erratum.
 - en2-rf-quirk: Specify that the trf7970a being used has the "EN2 RF"
   erratum.
+- t5t-rmb-extra-byte-quirk: Specify that the trf7970a has the erratum
+  where an extra byte is returned by Read Multiple Block commands issued
+  to Type 5 tags.
 
 Example (for ARM-based BeagleBone with TRF7970A on SPI1):
 
@@ -39,6 +42,7 @@
 		autosuspend-delay = <30000>;
 		irq-status-read-quirk;
 		en2-rf-quirk;
+		t5t-rmb-extra-byte-quirk;
 		status = "okay";
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
new file mode 100644
index 0000000..7edba12
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
@@ -0,0 +1,20 @@
+* NXP LPC1850 GMAC ethernet controller
+
+This device is a platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+Required properties:
+ - compatible:  Should contain "nxp,lpc1850-dwmac"
+
+Examples:
+
+mac: ethernet@40010000 {
+	compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
+	reg = <0x40010000 0x2000>;
+	interrupts = <5>;
+	interrupt-names = "macirq";
+	clocks = <&ccu1 CLK_CPU_ETHERNET>;
+	clock-names = "stmmaceth";
+	resets = <&rgu 22>;
+	reset-names = "stmmaceth";
+}
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 40831fb..525e165 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -30,6 +30,9 @@
 
 - max-speed: Maximum PHY supported speed (10, 100, 1000...)
 
+- broken-turn-around: If set, indicates the PHY device does not correctly
+  release the turn around line low at the end of a MDIO transaction.
+
 Example:
 
 ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
new file mode 100644
index 0000000..1fd8831
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -0,0 +1,48 @@
+* Renesas Electronics Ethernet AVB
+
+This file provides information on what the device node for the Ethernet AVB
+interface contains.
+
+Required properties:
+- compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC.
+	      "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
+- reg: offset and length of (1) the register block and (2) the stream buffer.
+- interrupts: interrupt specifier for the sole interrupt.
+- phy-mode: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory.
+- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
+- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
+- clocks: clock phandle and specifier pair.
+- pinctrl-0: phandle, referring to a default pin configuration node.
+
+Optional properties:
+- interrupt-parent: the phandle for the interrupt controller that services
+		    interrupts for this device.
+- pinctrl-names: pin configuration state name ("default").
+- renesas,no-ether-link: boolean, specify when a board does not provide a proper
+			 AVB_LINK signal.
+- renesas,ether-link-active-low: boolean, specify when the AVB_LINK signal is
+				 active-low instead of normal active-high.
+
+Example:
+
+	ethernet@e6800000 {
+		compatible = "renesas,etheravb-r8a7790";
+		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
+		interrupt-parent = <&gic>;
+		interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
+		phy-mode = "rmii";
+		phy-handle = <&phy0>;
+		pinctrl-0 = <&ether_pins>;
+		pinctrl-names = "default";
+		renesas,no-ether-link;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		phy0: ethernet-phy@0 {
+			reg = <0>;
+			interrupt-parent = <&gpio2>;
+			interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 21fd199..93eac7c 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -3,7 +3,7 @@
 The device node has following properties.
 
 Required properties:
- - compatible: Can be "rockchip,rk3288-gmac".
+ - compatible: Can be one of "rockchip,rk3288-gmac", "rockchip,rk3368-gmac"
  - reg: addresses and length of the register sets for the device.
  - interrupts: Should contain the GMAC interrupts.
  - interrupt-names: Should contain the interrupt names "macirq".
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
new file mode 100644
index 0000000..58d935b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -0,0 +1,25 @@
+* Texas Instruments - dp83867 Giga bit ethernet phy
+
+Required properties:
+	- reg - The ID number for the phy, usually a small integer
+	- ti,rx-internal-delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h
+		for applicable values
+	- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
+		for applicable values
+	- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
+		for applicable values
+
+Default child nodes are standard Ethernet PHY device
+nodes as described in Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+	ethernet-phy@0 {
+		reg = <0>;
+		ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+		ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_75_NS>;
+		ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+	};
+
+Datasheet can be found:
+http://www.ti.com/product/DP83867IR/datasheet
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 83bf498..334b49e 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -51,6 +51,7 @@
 3.4	Configuring Bonding Manually via Sysfs
 3.5	Configuration with Interfaces Support
 3.6	Overriding Configuration for Special Cases
+3.7 Configuring LACP for 802.3ad mode in a more secure way
 
 4. Querying Bonding Configuration
 4.1	Bonding Configuration
@@ -178,6 +179,27 @@
 	active slave, or the empty string if there is no active slave or
 	the current mode does not use an active slave.
 
+ad_actor_sys_prio
+
+	In an AD system, this specifies the system priority. The allowed range
+	is 1 - 65535. If the value is not specified, it takes 65535 as the
+	default value.
+
+	This parameter has effect only in 802.3ad mode and is available through
+	SysFs interface.
+
+ad_actor_system
+
+	In an AD system, this specifies the mac-address for the actor in
+	protocol packet exchanges (LACPDUs). The value cannot be NULL or
+	multicast. It is preferred to have the local-admin bit set for this
+	mac but driver does not enforce it. If the value is not given then
+	system defaults to using the masters' mac address as actors' system
+	address.
+
+	This parameter has effect only in 802.3ad mode and is available through
+	SysFs interface.
+
 ad_select
 
 	Specifies the 802.3ad aggregation selection logic to use.  The
@@ -220,6 +242,21 @@
 
 	This option was added in bonding version 3.4.0.
 
+ad_user_port_key
+
+	In an AD system, the port-key has three parts as shown below -
+
+	   Bits   Use
+	   00     Duplex
+	   01-05  Speed
+	   06-15  User-defined
+
+	This defines the upper 10 bits of the port key. The values can be
+	from 0 - 1023. If not given, the system defaults to 0.
+
+	This parameter has effect only in 802.3ad mode and is available through
+	SysFs interface.
+
 all_slaves_active
 
 	Specifies that duplicate frames (received on inactive ports) should be
@@ -1622,6 +1659,53 @@
 This feature first appeared in bonding driver version 3.7.0 and support for
 output slave selection was limited to round-robin and active-backup modes.
 
+3.7 Configuring LACP for 802.3ad mode in a more secure way
+----------------------------------------------------------
+
+When using 802.3ad bonding mode, the Actor (host) and Partner (switch)
+exchange LACPDUs.  These LACPDUs cannot be sniffed, because they are
+destined to link local mac addresses (which switches/bridges are not
+supposed to forward).  However, most of the values are easily predictable
+or are simply the machine's MAC address (which is trivially known to all
+other hosts in the same L2).  This implies that other machines in the L2
+domain can spoof LACPDU packets from other hosts to the switch and potentially
+cause mayhem by joining (from the point of view of the switch) another
+machine's aggregate, thus receiving a portion of that hosts incoming
+traffic and / or spoofing traffic from that machine themselves (potentially
+even successfully terminating some portion of flows). Though this is not
+a likely scenario, one could avoid this possibility by simply configuring
+few bonding parameters:
+
+   (a) ad_actor_system : You can set a random mac-address that can be used for
+       these LACPDU exchanges. The value can not be either NULL or Multicast.
+       Also it's preferable to set the local-admin bit. Following shell code
+       generates a random mac-address as described above.
+
+       # sys_mac_addr=$(printf '%02x:%02x:%02x:%02x:%02x:%02x' \
+                                $(( (RANDOM & 0xFE) | 0x02 )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )))
+       # echo $sys_mac_addr > /sys/class/net/bond0/bonding/ad_actor_system
+
+   (b) ad_actor_sys_prio : Randomize the system priority. The default value
+       is 65535, but system can take the value from 1 - 65535. Following shell
+       code generates random priority and sets it.
+
+       # sys_prio=$(( 1 + RANDOM + RANDOM ))
+       # echo $sys_prio > /sys/class/net/bond0/bonding/ad_actor_sys_prio
+
+   (c) ad_user_port_key : Use the user portion of the port-key. The default
+       keeps this empty. These are the upper 10 bits of the port-key and value
+       ranges from 0 - 1023. Following shell code generates these 10 bits and
+       sets it.
+
+       # usr_port_key=$(( RANDOM & 0x3FF ))
+       # echo $usr_port_key > /sys/class/net/bond0/bonding/ad_user_port_key
+
+
 4 Querying Bonding Configuration
 =================================
 
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 5abad1e..b48d4a1 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -268,6 +268,9 @@
     struct can_frame {
             canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
             __u8    can_dlc; /* frame payload length in byte (0 .. 8) */
+            __u8    __pad;   /* padding */
+            __u8    __res0;  /* reserved / padding */
+            __u8    __res1;  /* reserved / padding */
             __u8    data[8] __attribute__((aligned(8)));
     };
 
diff --git a/Documentation/networking/dctcp.txt b/Documentation/networking/dctcp.txt
index 0d5dfbc..13a8577 100644
--- a/Documentation/networking/dctcp.txt
+++ b/Documentation/networking/dctcp.txt
@@ -8,6 +8,7 @@
 To enable it on end hosts:
 
   sysctl -w net.ipv4.tcp_congestion_control=dctcp
+  sysctl -w net.ipv4.tcp_ecn_fallback=0 (optional)
 
 All switches in the data center network running DCTCP must support ECN
 marking and be configured for marking when reaching defined switch buffer
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 22bbc72..1700756 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -30,8 +30,8 @@
 
 The address family, socket addresses etc. are defined in the
 include/net/af_ieee802154.h header or in the special header
-in our userspace package (see either linux-zigbee sourceforge download page
-or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee).
+in the userspace package (see either http://wpan.cakelab.org/ or the
+git tree at https://github.com/linux-wpan/wpan-tools).
 
 One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
 
@@ -49,15 +49,6 @@
 Those types of devices require different approach to be hooked into Linux kernel.
 
 
-MLME - MAC Level Management
-============================
-
-Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
-See the include/net/nl802154.h header. Our userspace tools package
-(see above) provides CLI configuration utility for radio interfaces and simple
-coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
-
-
 HardMAC
 =======
 
@@ -75,8 +66,6 @@
 assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional.
 All other fields are required.
 
-We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c
-
 
 SoftMAC
 =======
@@ -89,7 +78,8 @@
 
 This layer is going to be extended soon.
 
-See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
+See header include/net/mac802154.h and several drivers in
+drivers/net/ieee802154/.
 
 
 Device drivers API
@@ -114,18 +104,17 @@
 Fake drivers
 ============
 
-In addition there are two drivers available which simulate real devices with
-HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver)
-interfaces. This option provides possibility to test and debug stack without
-usage of real hardware.
+In addition there is a driver available which simulates a real device with
+SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
+provides possibility to test and debug stack without usage of real hardware.
 
-See sources in drivers/ieee802154 folder for more details.
+See sources in drivers/net/ieee802154 folder for more details.
 
 
 6LoWPAN Linux implementation
 ============================
 
-The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80
 octets of actual MAC payload once security is turned on, on a wireless link
 with a link throughput of 250 kbps or less.  The 6LoWPAN adaptation format
 [RFC4944] was specified to carry IPv6 datagrams over such constrained links,
@@ -140,7 +129,8 @@
 It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
 used in this Linux implementation.
 
-All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+All the code related to 6lowpan you may find in files: net/6lowpan/*
+and net/ieee802154/6lowpan/*
 
 To setup 6lowpan interface you need (busybox release > 1.17.0):
 1. Add IEEE802.15.4 interface and initialize PANid;
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 071fb18..5fae770 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -267,6 +267,15 @@
 		  but do not request ECN on outgoing connections.
 	Default: 2
 
+tcp_ecn_fallback - BOOLEAN
+	If the kernel detects that ECN connection misbehaves, enable fall
+	back to non-ECN. Currently, this knob implements the fallback
+	from RFC3168, section 6.1.1.1., but we reserve that in future,
+	additional detection mechanisms could be implemented under this
+	knob. The value	is not used, if tcp_ecn or per route (or congestion
+	control) ECN settings are disabled.
+	Default: 1 (fallback enabled)
+
 tcp_fack - BOOLEAN
 	Enable FACK congestion avoidance and fast retransmission.
 	The value is not used, if tcp_sack is not enabled.
@@ -742,8 +751,10 @@
 ip_local_port_range - 2 INTEGERS
 	Defines the local port range that is used by TCP and UDP to
 	choose the local port. The first number is the first, the
-	second the last local port number. The default values are
-	32768 and 61000 respectively.
+	second the last local port number.
+	If possible, it is better these numbers have different parity.
+	(one even and one odd values)
+	The default values are 32768 and 60999 respectively.
 
 ip_local_reserved_ports - list of comma separated ranges
 	Specify the ports which are reserved for known third-party
@@ -766,7 +777,7 @@
 	ip_local_port_range, e.g.:
 
 	$ cat /proc/sys/net/ipv4/ip_local_port_range
-	32000	61000
+	32000	60999
 	$ cat /proc/sys/net/ipv4/ip_local_reserved_ports
 	8080,9148
 
@@ -1213,6 +1224,14 @@
 	FALSE: disabled
 	Default: false
 
+flowlabel_state_ranges - BOOLEAN
+	Split the flow label number space into two ranges. 0-0x7FFFF is
+	reserved for the IPv6 flow manager facility, 0x80000-0xFFFFF
+	is reserved for stateless flow labels as described in RFC6437.
+	TRUE: enabled
+	FALSE: disabled
+	Default: true
+
 anycast_src_echo_reply - BOOLEAN
 	Controls the use of anycast addresses as source addresses for ICMPv6
 	echo reply
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 0344f1d..f4be85e 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -1,6 +1,6 @@
 
 
-                  HOWTO for the linux packet generator 
+                  HOWTO for the linux packet generator
                   ------------------------------------
 
 Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel
@@ -50,17 +50,33 @@
  # ethtool -C ethX rx-usecs 30
 
 
-Viewing threads
-===============
-/proc/net/pktgen/kpktgend_0 
-Name: kpktgend_0  max_before_softirq: 10000
-Running: 
-Stopped: eth1 
-Result: OK: max_before_softirq=10000
+Kernel threads
+==============
+Pktgen creates a thread for each CPU with affinity to that CPU.
+Which is controlled through procfile /proc/net/pktgen/kpktgend_X.
 
-Most important are the devices assigned to the thread.  Note that a
-device can only belong to one thread.
+Example: /proc/net/pktgen/kpktgend_0
 
+ Running:
+ Stopped: eth4@0
+ Result: OK: add_device=eth4@0
+
+Most important are the devices assigned to the thread.
+
+The two basic thread commands are:
+ * add_device DEVICE@NAME -- adds a single device
+ * rem_device_all         -- remove all associated devices
+
+When adding a device to a thread, a corrosponding procfile is created
+which is used for configuring this device. Thus, device names need to
+be unique.
+
+To support adding the same device to multiple threads, which is useful
+with multi queue NICs, a the device naming scheme is extended with "@":
+ device@something
+
+The part after "@" can be anything, but it is custom to use the thread
+number.
 
 Viewing devices
 ===============
@@ -69,29 +85,32 @@
 holds running statistics.  The Result is printed after a run or after
 interruption.  Example:
 
-/proc/net/pktgen/eth1       
+/proc/net/pktgen/eth4@0
 
-Params: count 10000000  min_pkt_size: 60  max_pkt_size: 60
-     frags: 0  delay: 0  clone_skb: 1000000  ifname: eth1
+ Params: count 100000  min_pkt_size: 60  max_pkt_size: 60
+     frags: 0  delay: 0  clone_skb: 64  ifname: eth4@0
      flows: 0 flowlen: 0
-     dst_min: 10.10.11.2  dst_max: 
-     src_min:   src_max: 
-     src_mac: 00:00:00:00:00:00  dst_mac: 00:04:23:AC:FD:82
-     udp_src_min: 9  udp_src_max: 9  udp_dst_min: 9  udp_dst_max: 9
-     src_mac_count: 0  dst_mac_count: 0 
-     Flags: 
-Current:
-     pkts-sofar: 10000000  errors: 39664
-     started: 1103053986245187us  stopped: 1103053999346329us idle: 880401us
-     seq_num: 10000011  cur_dst_mac_offset: 0  cur_src_mac_offset: 0
-     cur_saddr: 0x10a0a0a  cur_daddr: 0x20b0a0a
-     cur_udp_dst: 9  cur_udp_src: 9
+     queue_map_min: 0  queue_map_max: 0
+     dst_min: 192.168.81.2  dst_max:
+     src_min:   src_max:
+     src_mac: 90:e2:ba:0a:56:b4 dst_mac: 00:1b:21:3c:9d:f8
+     udp_src_min: 9  udp_src_max: 109  udp_dst_min: 9  udp_dst_max: 9
+     src_mac_count: 0  dst_mac_count: 0
+     Flags: UDPSRC_RND  NO_TIMESTAMP  QUEUE_MAP_CPU
+ Current:
+     pkts-sofar: 100000  errors: 0
+     started: 623913381008us  stopped: 623913396439us idle: 25us
+     seq_num: 100001  cur_dst_mac_offset: 0  cur_src_mac_offset: 0
+     cur_saddr: 192.168.8.3  cur_daddr: 192.168.81.2
+     cur_udp_dst: 9  cur_udp_src: 42
+     cur_queue_map: 0
      flows: 0
-Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
-  763292pps 390Mb/sec (390805504bps) errors: 39664
+ Result: OK: 15430(c15405+d25) usec, 100000 (60byte,0frags)
+  6480562pps 3110Mb/sec (3110669760bps) errors: 0
 
-Configuring threads and devices
-================================
+
+Configuring devices
+===================
 This is done via the /proc interface, and most easily done via pgset
 as defined in the sample scripts.
 
@@ -126,7 +145,7 @@
                          To select queue 1 of a given device,
                          use queue_map_min=1 and queue_map_max=1
 
- pgset "src_mac_count 1" Sets the number of MACs we'll range through.  
+ pgset "src_mac_count 1" Sets the number of MACs we'll range through.
                          The 'minimum' MAC is what you set with srcmac.
 
  pgset "dst_mac_count 1" Sets the number of MACs we'll range through.
@@ -145,6 +164,7 @@
                               UDPCSUM,
                               IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
                               NODE_ALLOC # node specific memory allocation
+                              NO_TIMESTAMP # disable timestamping
 
  pgset spi SPI_VALUE     Set specific SA used to transform packet.
 
@@ -192,24 +212,43 @@
  pgset "rate 300M"        set rate to 300 Mb/s
  pgset "ratep 1000000"    set rate to 1Mpps
 
+ pgset "xmit_mode netif_receive"  RX inject into stack netif_receive_skb()
+				  Works with "burst" but not with "clone_skb".
+				  Default xmit_mode is "start_xmit".
+
 Sample scripts
 ==============
 
-A collection of small tutorial scripts for pktgen is in the
-samples/pktgen directory:
+A collection of tutorial scripts and helpers for pktgen is in the
+samples/pktgen directory. The helper parameters.sh file support easy
+and consistant parameter parsing across the sample scripts.
 
-pktgen.conf-1-1                  # 1 CPU 1 dev 
+Usage example and help:
+ ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
+
+Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+  -i : ($DEV)       output interface/device (required)
+  -s : ($PKT_SIZE)  packet size
+  -d : ($DEST_IP)   destination IP
+  -m : ($DST_MAC)   destination MAC-addr
+  -t : ($THREADS)   threads to start
+  -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+  -b : ($BURST)     HW level bursting of SKBs
+  -v : ($VERBOSE)   verbose
+  -x : ($DEBUG)     debug
+
+The global variables being set are also listed.  E.g. the required
+interface/device parameter "-i" sets variable $DEV.  Copy the
+pktgen_sampleXX scripts and modify them to fit your own needs.
+
+The old scripts:
+
 pktgen.conf-1-2                  # 1 CPU 2 dev
-pktgen.conf-2-1                  # 2 CPU's 1 dev 
-pktgen.conf-2-2                  # 2 CPU's 2 dev
 pktgen.conf-1-1-rdos             # 1 CPU 1 dev w. route DoS 
 pktgen.conf-1-1-ip6              # 1 CPU 1 dev ipv6
 pktgen.conf-1-1-ip6-rdos         # 1 CPU 1 dev ipv6  w. route DoS
 pktgen.conf-1-1-flows            # 1 CPU 1 dev multiple flows.
 
-Run in shell: ./pktgen.conf-X-Y
-This does all the setup including sending.
-
 
 Interrupt affinity
 ===================
@@ -217,6 +256,9 @@
 also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound
 to the same CPU.  This reduces cache bouncing when freeing skbs.
 
+Plus using the device flag QUEUE_MAP_CPU, which maps the SKBs TX queue
+to the running threads CPU (directly from smp_processor_id()).
+
 Enable IPsec
 ============
 Default IPsec transformation with ESP encapsulation plus transport mode
@@ -237,18 +279,19 @@
 
 start
 stop
+reset
 
 ** Thread commands:
 
 add_device
 rem_device_all
-max_before_softirq
 
 
 ** Device commands:
 
 count
 clone_skb
+burst
 debug
 
 frags
@@ -257,10 +300,17 @@
 src_mac_count
 dst_mac_count
 
-pkt_size 
+pkt_size
 min_pkt_size
 max_pkt_size
 
+queue_map_min
+queue_map_max
+skb_priority
+
+tos           (ipv4)
+traffic_class (ipv6)
+
 mpls
 
 udp_src_min
@@ -269,6 +319,8 @@
 udp_dst_min
 udp_dst_max
 
+node
+
 flag
   IPSRC_RND
   IPDST_RND
@@ -287,6 +339,9 @@
   UDPCSUM
   IPSEC
   NODE_ALLOC
+  NO_TIMESTAMP
+
+spi (ipsec)
 
 dst_min
 dst_max
@@ -299,8 +354,10 @@
 
 clear_counters
 
-dst6
 src6
+dst6
+dst6_max
+dst6_min
 
 flows
 flowlen
@@ -308,6 +365,17 @@
 rate
 ratep
 
+xmit_mode <start_xmit|netif_receive>
+
+vlan_cfi
+vlan_id
+vlan_p
+
+svlan_cfi
+svlan_id
+svlan_p
+
+
 References:
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index f981a92..c5d7ade 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -1,59 +1,360 @@
-Switch (and switch-ish) device drivers HOWTO
-===========================
+Ethernet switch device driver model (switchdev)
+===============================================
+Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
 
-Please note that the word "switch" is here used in very generic meaning.
-This include devices supporting L2/L3 but also various flow offloading chips,
-including switches embedded into SR-IOV NICs.
 
-Lets describe a topology a bit. Imagine the following example:
+The Ethernet switch device driver model (switchdev) is an in-kernel driver
+model for switch devices which offload the forwarding (data) plane from the
+kernel.
 
-       +----------------------------+    +---------------+
-       |     SOME switch chip       |    |      CPU      |
-       +----------------------------+    +---------------+
-       port1 port2 port3 port4 MNGMNT    |     PCI-E     |
-         |     |     |     |     |       +---------------+
-        PHY   PHY    |     |     |         |  NIC0 NIC1
-                     |     |     |         |   |    |
-                     |     |     +- PCI-E -+   |    |
-                     |     +------- MII -------+    |
-                     +------------- MII ------------+
+Figure 1 is a block diagram showing the components of the switchdev model for
+an example setup using a data-center-class switch ASIC chip.  Other setups
+with SR-IOV or soft switches, such as OVS, are possible.
 
-In this example, there are two independent lines between the switch silicon
-and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are
-separate from the switch driver. SOME switch chip is by managed by a driver
-via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be
-connected to some other type of bus.
 
-Now, for the previous example show the representation in kernel:
+                             User-space tools                                 
+                                                                              
+       user space                   |                                         
+      +-------------------------------------------------------------------+   
+       kernel                       | Netlink                                 
+                                    |                                         
+                     +--------------+-------------------------------+         
+                     |         Network stack                        |         
+                     |           (Linux)                            |         
+                     |                                              |         
+                     +----------------------------------------------+         
+                                                                              
+                           sw1p2     sw1p4     sw1p6
+                      sw1p1  +  sw1p3  +  sw1p5  +          eth1             
+                        +    |    +    |    +    |            +               
+                        |    |    |    |    |    |            |               
+                     +--+----+----+----+-+--+----+---+  +-----+-----+         
+                     |         Switch driver         |  |    mgmt   |         
+                     |        (this document)        |  |   driver  |         
+                     |                               |  |           |         
+                     +--------------+----------------+  +-----------+         
+                                    |                                         
+       kernel                       | HW bus (eg PCI)                         
+      +-------------------------------------------------------------------+   
+       hardware                     |                                         
+                     +--------------+---+------------+                        
+                     |         Switch device (sw1)   |                        
+                     |  +----+                       +--------+               
+                     |  |    v offloaded data path   | mgmt port              
+                     |  |    |                       |                        
+                     +--|----|----+----+----+----+---+                        
+                        |    |    |    |    |    |                            
+                        +    +    +    +    +    +                            
+                       p1   p2   p3   p4   p5   p6
+                                       
+                             front-panel ports                                
+                                                                              
 
-       +----------------------------+    +---------------+
-       |     SOME switch chip       |    |      CPU      |
-       +----------------------------+    +---------------+
-       sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT    |     PCI-E     |
-         |     |     |     |     |       +---------------+
-        PHY   PHY    |     |     |         |  eth0 eth1
-                     |     |     |         |   |    |
-                     |     |     +- PCI-E -+   |    |
-                     |     +------- MII -------+    |
-                     +------------- MII ------------+
+                                    Fig 1.
 
-Lets call the example switch driver for SOME switch chip "SOMEswitch". This
-driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX
-created for each port of a switch. These netdevices are instances
-of "SOMEswitch" driver. sw0pX netdevices serve as a "representation"
-of the switch chip. eth0 and eth1 are instances of some other existing driver.
 
-The only difference of the switch-port netdevice from the ordinary netdevice
-is that is implements couple more NDOs:
+Include Files
+-------------
 
-  ndo_switch_parent_id_get - This returns the same ID for two port netdevices
-			     of the same physical switch chip. This is
-			     mandatory to be implemented by all switch drivers
-			     and serves the caller for recognition of a port
-			     netdevice.
-  ndo_switch_parent_* - Functions that serve for a manipulation of the switch
-			chip itself (it can be though of as a "parent" of the
-			port, therefore the name). They are not port-specific.
-			Caller might use arbitrary port netdevice of the same
-			switch and it will make no difference.
-  ndo_switch_port_* - Functions that serve for a port-specific manipulation.
+#include <linux/netdevice.h>
+#include <net/switchdev.h>
+
+
+Configuration
+-------------
+
+Use "depends NET_SWITCHDEV" in driver's Kconfig to ensure switchdev model
+support is built for driver.
+
+
+Switch Ports
+------------
+
+On switchdev driver initialization, the driver will allocate and register a
+struct net_device (using register_netdev()) for each enumerated physical switch
+port, called the port netdev.  A port netdev is the software representation of
+the physical port and provides a conduit for control traffic to/from the
+controller (the kernel) and the network, as well as an anchor point for higher
+level constructs such as bridges, bonds, VLANs, tunnels, and L3 routers.  Using
+standard netdev tools (iproute2, ethtool, etc), the port netdev can also
+provide to the user access to the physical properties of the switch port such
+as PHY link state and I/O statistics.
+
+There is (currently) no higher-level kernel object for the switch beyond the
+port netdevs.  All of the switchdev driver ops are netdev ops or switchdev ops.
+
+A switch management port is outside the scope of the switchdev driver model.
+Typically, the management port is not participating in offloaded data plane and
+is loaded with a different driver, such as a NIC driver, on the management port
+device.
+
+Port Netdev Naming
+^^^^^^^^^^^^^^^^^^
+
+Udev rules should be used for port netdev naming, using some unique attribute
+of the port as a key, for example the port MAC address or the port PHYS name.
+Hard-coding of kernel netdev names within the driver is discouraged; let the
+kernel pick the default netdev name, and let udev set the final name based on a
+port attribute.
+
+Using port PHYS name (ndo_get_phys_port_name) for the key is particularly
+useful for dynamically-named ports where the device names its ports based on
+external configuration.  For example, if a physical 40G port is split logically
+into 4 10G ports, resulting in 4 port netdevs, the device can give a unique
+name for each port using port PHYS name.  The udev rule would be:
+
+SUBSYSTEM=="net", ACTION=="add", DRIVER="<driver>", ATTR{phys_port_name}!="", \
+	NAME="$attr{phys_port_name}"
+
+Suggested naming convention is "swXpYsZ", where X is the switch name or ID, Y
+is the port name or ID, and Z is the sub-port name or ID.  For example, sw1p1s0
+would be sub-port 0 on port 1 on switch 1.
+
+Switch ID
+^^^^^^^^^
+
+The switchdev driver must implement the switchdev op switchdev_port_attr_get
+for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same
+physical ID for each port of a switch.  The ID must be unique between switches
+on the same system.  The ID does not need to be unique between switches on
+different systems.
+
+The switch ID is used to locate ports on a switch and to know if aggregated
+ports belong to the same switch.
+
+Port Features
+^^^^^^^^^^^^^
+
+NETIF_F_NETNS_LOCAL
+
+If the switchdev driver (and device) only supports offloading of the default
+network namespace (netns), the driver should set this feature flag to prevent
+the port netdev from being moved out of the default netns.  A netns-aware
+driver/device would not set this flag and be responsible for partitioning
+hardware to preserve netns containment.  This means hardware cannot forward
+traffic from a port in one namespace to another port in another namespace.
+
+Port Topology
+^^^^^^^^^^^^^
+
+The port netdevs representing the physical switch ports can be organized into
+higher-level switching constructs.  The default construct is a standalone
+router port, used to offload L3 forwarding.  Two or more ports can be bonded
+together to form a LAG.  Two or more ports (or LAGs) can be bridged to bridge
+L2 networks.  VLANs can be applied to sub-divide L2 networks.  L2-over-L3
+tunnels can be built on ports.  These constructs are built using standard Linux
+tools such as the bridge driver, the bonding/team drivers, and netlink-based
+tools such as iproute2.
+
+The switchdev driver can know a particular port's position in the topology by
+monitoring NETDEV_CHANGEUPPER notifications.  For example, a port moved into a
+bond will see it's upper master change.  If that bond is moved into a bridge,
+the bond's upper master will change.  And so on.  The driver will track such
+movements to know what position a port is in in the overall topology by
+registering for netdevice events and acting on NETDEV_CHANGEUPPER.
+
+L2 Forwarding Offload
+---------------------
+
+The idea is to offload the L2 data forwarding (switching) path from the kernel
+to the switchdev device by mirroring bridge FDB entries down to the device.  An
+FDB entry is the {port, MAC, VLAN} tuple forwarding destination.
+
+To offloading L2 bridging, the switchdev driver/device should support:
+
+	- Static FDB entries installed on a bridge port
+	- Notification of learned/forgotten src mac/vlans from device
+	- STP state changes on the port
+	- VLAN flooding of multicast/broadcast and unknown unicast packets
+
+Static FDB Entries
+^^^^^^^^^^^^^^^^^^
+
+The switchdev driver should implement ndo_fdb_add, ndo_fdb_del and ndo_fdb_dump
+to support static FDB entries installed to the device.  Static bridge FDB
+entries are installed, for example, using iproute2 bridge cmd:
+
+	bridge fdb add ADDR dev DEV [vlan VID] [self]
+
+The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx
+ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using
+switchdev_port_obj_xxx ops.
+
+XXX: what should be done if offloading this rule to hardware fails (for
+example, due to full capacity in hardware tables) ?
+
+Note: by default, the bridge does not filter on VLAN and only bridges untagged
+traffic.  To enable VLAN support, turn on VLAN filtering:
+
+	echo 1 >/sys/class/net/<bridge>/bridge/vlan_filtering
+
+Notification of Learned/Forgotten Source MAC/VLANs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The switch device will learn/forget source MAC address/VLAN on ingress packets
+and notify the switch driver of the mac/vlan/port tuples.  The switch driver,
+in turn, will notify the bridge driver using the switchdev notifier call:
+
+	err = call_switchdev_notifiers(val, dev, info);
+
+Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when
+forgetting, and info points to a struct switchdev_notifier_fdb_info.  On
+SWITCHDEV_FDB_ADD, the bridge driver will install the FDB entry into the
+bridge's FDB and mark the entry as NTF_EXT_LEARNED.  The iproute2 bridge
+command will label these entries "offload":
+
+	$ bridge fdb
+	52:54:00:12:35:01 dev sw1p1 master br0 permanent
+	00:02:00:00:02:00 dev sw1p1 master br0 offload
+	00:02:00:00:02:00 dev sw1p1 self
+	52:54:00:12:35:02 dev sw1p2 master br0 permanent
+	00:02:00:00:03:00 dev sw1p2 master br0 offload
+	00:02:00:00:03:00 dev sw1p2 self
+	33:33:00:00:00:01 dev eth0 self permanent
+	01:00:5e:00:00:01 dev eth0 self permanent
+	33:33:ff:00:00:00 dev eth0 self permanent
+	01:80:c2:00:00:0e dev eth0 self permanent
+	33:33:00:00:00:01 dev br0 self permanent
+	01:00:5e:00:00:01 dev br0 self permanent
+	33:33:ff:12:35:01 dev br0 self permanent
+
+Learning on the port should be disabled on the bridge using the bridge command:
+
+	bridge link set dev DEV learning off
+
+Learning on the device port should be enabled, as well as learning_sync:
+
+	bridge link set dev DEV learning on self
+	bridge link set dev DEV learning_sync on self
+
+Learning_sync attribute enables syncing of the learned/forgotton FDB entry to
+the bridge's FDB.  It's possible, but not optimal, to enable learning on the
+device port and on the bridge port, and disable learning_sync.
+
+To support learning and learning_sync port attributes, the driver implements
+switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS.
+The driver should initialize the attributes to the hardware defaults.
+
+FDB Ageing
+^^^^^^^^^^
+
+There are two FDB ageing models supported: 1) ageing by the device, and 2)
+ageing by the kernel.  Ageing by the device is preferred if many FDB entries
+are supported.  The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+...) to age out the FDB entry.  In this model, ageing by the kernel should be
+turned off.  XXX: how to turn off ageing in kernel on a per-port basis or
+otherwise prevent the kernel from ageing out the FDB entry?
+
+In the kernel ageing model, the standard bridge ageing mechanism is used to age
+out stale FDB entries.  To keep an FDB entry "alive", the driver should refresh
+the FDB entry by calling call_switchdev_notifiers(SWITCHDEV_FDB_ADD, ...).  The
+notification will reset the FDB entry's last-used time to now.  The driver
+should rate limit refresh notifications, for example, no more than once a
+second.  If the FDB entry expires, fdb_delete is called to remove entry from
+the device.
+
+STP State Change on Port
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Internally or with a third-party STP protocol implementation (e.g. mstpd), the
+bridge driver maintains the STP state for ports, and will notify the switch
+driver of STP state change on a port using the switchdev op
+switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE.
+
+State is one of BR_STATE_*.  The switch driver can use STP state updates to
+update ingress packet filter list for the port.  For example, if port is
+DISABLED, no packets should pass, but if port moves to BLOCKED, then STP BPDUs
+and other IEEE 01:80:c2:xx:xx:xx link-local multicast packets can pass.
+
+Note that STP BDPUs are untagged and STP state applies to all VLANs on the port
+so packet filters should be applied consistently across untagged and tagged
+VLANs on the port.
+
+Flooding L2 domain
+^^^^^^^^^^^^^^^^^^
+
+For a given L2 VLAN domain, the switch device should flood multicast/broadcast
+and unknown unicast packets to all ports in domain, if allowed by port's
+current STP state.  The switch driver, knowing which ports are within which
+vlan L2 domain, can program the switch device for flooding.  The packet should
+also be sent to the port netdev for processing by the bridge driver.  The
+bridge should not reflood the packet to the same ports the device flooded.
+XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+
+It is possible for the switch device to not handle flooding and push the
+packets up to the bridge driver for flooding.  This is not ideal as the number
+of ports scale in the L2 domain as the device is much more efficient at
+flooding packets that software.
+
+IGMP Snooping
+^^^^^^^^^^^^^
+
+XXX: complete this section
+
+
+L3 Routing Offload
+------------------
+
+Offloading L3 routing requires that device be programmed with FIB entries from
+the kernel, with the device doing the FIB lookup and forwarding.  The device
+does a longest prefix match (LPM) on FIB entries matching route prefix and
+forwards the packet to the matching FIB entry's nexthop(s) egress ports.
+
+To program the device, the driver implements support for
+SWITCHDEV_OBJ_IPV[4|6]_FIB object using switchdev_port_obj_xxx ops.
+switchdev_port_obj_add is used for both adding a new FIB entry to the device,
+or modifying an existing entry on the device.
+
+XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported.
+
+SWITCHDEV_OBJ_IPV4_FIB object passes:
+
+	struct switchdev_obj_ipv4_fib {         /* IPV4_FIB */
+		u32 dst;
+		int dst_len;
+		struct fib_info *fi;
+		u8 tos;
+		u8 type;
+		u32 nlflags;
+		u32 tb_id;
+	} ipv4_fib;
+
+to add/modify/delete IPv4 dst/dest_len prefix on table tb_id.  The *fi
+structure holds details on the route and route's nexthops.  *dev is one of the
+port netdevs mentioned in the routes next hop list.  If the output port netdevs
+referenced in the route's nexthop list don't all have the same switch ID, the
+driver is not called to add/modify/delete the FIB entry.
+
+Routes offloaded to the device are labeled with "offload" in the ip route
+listing:
+
+	$ ip route show
+	default via 192.168.0.2 dev eth0
+	11.0.0.0/30 dev sw1p1  proto kernel  scope link  src 11.0.0.2 offload
+	11.0.0.4/30 via 11.0.0.1 dev sw1p1  proto zebra  metric 20 offload
+	11.0.0.8/30 dev sw1p2  proto kernel  scope link  src 11.0.0.10 offload
+	11.0.0.12/30 via 11.0.0.9 dev sw1p2  proto zebra  metric 20 offload
+	12.0.0.2  proto zebra  metric 30 offload
+		nexthop via 11.0.0.1  dev sw1p1 weight 1
+		nexthop via 11.0.0.9  dev sw1p2 weight 1
+	12.0.0.3 via 11.0.0.1 dev sw1p1  proto zebra  metric 20 offload
+	12.0.0.4 via 11.0.0.9 dev sw1p2  proto zebra  metric 20 offload
+	192.168.0.0/24 dev eth0  proto kernel  scope link  src 192.168.0.15
+
+XXX: add/mod/del IPv6 FIB API
+
+Nexthop Resolution
+^^^^^^^^^^^^^^^^^^
+
+The FIB entry's nexthop list contains the nexthop tuple (gateway, dev), but for
+the switch device to forward the packet with the correct dst mac address, the
+nexthop gateways must be resolved to the neighbor's mac address.  Neighbor mac
+address discovery comes via the ARP (or ND) process and is available via the
+arp_tbl neighbor table.  To resolve the routes nexthop gateways, the driver
+should trigger the kernel's neighbor resolution process.  See the rocker
+driver's rocker_port_ipv4_resolve() for an example.
+
+The driver can monitor for updates to arp_tbl using the netevent notifier
+NETEVENT_NEIGH_UPDATE.  The device can be programmed with resolved nexthops
+for the routes as arp_tbl updates.
diff --git a/Documentation/networking/tc-actions-env-rules.txt b/Documentation/networking/tc-actions-env-rules.txt
index 70d6cf6..f378146 100644
--- a/Documentation/networking/tc-actions-env-rules.txt
+++ b/Documentation/networking/tc-actions-env-rules.txt
@@ -8,14 +8,8 @@
 or intentionally branches by redirecting a packet, then you need to
 clone the packet.
 
-There are certain fields in the skb tc_verd that need to be reset so we
-avoid loops, etc.  A few are generic enough that skb_act_clone()
-resets them for you, so invoke skb_act_clone() rather than skb_clone().
-
 2) If you munge any packet thou shalt call pskb_expand_head in the case
 someone else is referencing the skb. After that you "own" the skb.
-You must also tell us if it is ok to munge the packet (TC_OK2MUNGE),
-this way any action downstream can stomp on the packet.
 
 3) Dropping packets you don't own is a no-no. You simply return
 TC_ACT_SHOT to the caller and they will drop it.
diff --git a/Documentation/nfc/nfc-hci.txt b/Documentation/nfc/nfc-hci.txt
index 0686c9e..0dc078c 100644
--- a/Documentation/nfc/nfc-hci.txt
+++ b/Documentation/nfc/nfc-hci.txt
@@ -122,7 +122,7 @@
 PHY Management
 --------------
 
-The physical link (i2c, ...) management is defined by the following struture:
+The physical link (i2c, ...) management is defined by the following structure:
 
 struct nfc_phy_ops {
 	int (*write)(void *dev_id, struct sk_buff *skb);
diff --git a/Documentation/s390/qeth.txt b/Documentation/s390/qeth.txt
index 74122ad..aa06fcf 100644
--- a/Documentation/s390/qeth.txt
+++ b/Documentation/s390/qeth.txt
@@ -1,6 +1,6 @@
 IBM s390 QDIO Ethernet Driver
 
-HiperSockets Bridge Port Support
+OSA and HiperSockets Bridge Port Support
 
 Uevents
 
@@ -8,7 +8,7 @@
 a primary or a secondary Bridge Port. For more information, see
 "z/VM Connectivity, SC24-6174".
 
-When run on HiperSockets Bridge Capable Port hardware, and the state
+When run on an OSA or HiperSockets Bridge Capable Port hardware, and the state
 of some configured Bridge Port device on the channel changes, a udev
 event with ACTION=CHANGE is emitted on behalf of the corresponding
 ccwgroup device. The event has the following attributes:
diff --git a/MAINTAINERS b/MAINTAINERS
index 78a9174..7ee651e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -653,7 +653,6 @@
 L:	netdev@vger.kernel.org
 S:	Supported
 F:	drivers/net/ethernet/amd/xgbe/
-F:	drivers/net/phy/amd-xgbe-phy.c
 
 AMS (Apple Motion Sensor) DRIVER
 M:	Michael Hanselmann <linux-kernel@hansmi.ch>
@@ -923,6 +922,13 @@
 S:	Maintained
 F:	arch/arm/mach-cns3xxx/
 
+ARM/CAVIUM THUNDER NETWORK DRIVER
+M:	Sunil Goutham <sgoutham@cavium.com>
+M:	Robert Richter <rric@kernel.org>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Supported
+F:	drivers/net/ethernet/cavium/
+
 ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
 M:	Alexander Shiyan <shc_work@mail.ru>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1873,6 +1879,14 @@
 S:	Supported
 F:	drivers/scsi/esas2r
 
+ATUSB IEEE 802.15.4 RADIO DRIVER
+M:	Stefan Schmidt <stefan@osg.samsung.com>
+L:	linux-wpan@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ieee802154/atusb.c
+F:	drivers/net/ieee802154/atusb.h
+F:	drivers/net/ieee802154/at86rf230.h
+
 AUDIT SUBSYSTEM
 M:	Paul Moore <paul@paul-moore.com>
 M:	Eric Paris <eparis@redhat.com>
@@ -2453,6 +2467,17 @@
 F:	drivers/iio/light/cm*
 F:	Documentation/devicetree/bindings/i2c/trivial-devices.txt
 
+CAVIUM LIQUIDIO NETWORK DRIVER
+M:     Derek Chickles <derek.chickles@caviumnetworks.com>
+M:     Satanand Burla <satananda.burla@caviumnetworks.com>
+M:     Felix Manlunas <felix.manlunas@caviumnetworks.com>
+M:     Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
+L:     netdev@vger.kernel.org
+W:     http://www.cavium.com
+S:     Supported
+F:     drivers/net/ethernet/cavium/
+F:     drivers/net/ethernet/cavium/liquidio/
+
 CC2520 IEEE-802.15.4 RADIO DRIVER
 M:	Varka Bhadram <varkabhadram@gmail.com>
 L:	linux-wpan@vger.kernel.org
@@ -6411,6 +6436,12 @@
 F:	include/uapi/linux/ivtv*
 F:	include/uapi/linux/uvcvideo.h
 
+MEDIATEK MT7601U WIRELESS LAN DRIVER
+M:	Jakub Kicinski <kubakici@wp.pl>
+L:	linux-wireless@vger.kernel.org
+S:	Maintained
+F:	drivers/net/wireless/mediatek/mt7601u/
+
 MEGARAID SCSI/SAS DRIVERS
 M:	Kashyap Desai <kashyap.desai@avagotech.com>
 M:	Sumit Saxena <sumit.saxena@avagotech.com>
@@ -8197,8 +8228,6 @@
 M:	Stanislaw Gruszka <sgruszka@redhat.com>
 M:	Helmut Schaa <helmut.schaa@googlemail.com>
 L:	linux-wireless@vger.kernel.org
-L:	users@rt2x00.serialmonkey.com (moderated for non-subscribers)
-W:	http://rt2x00.serialmonkey.com/
 S:	Maintained
 F:	drivers/net/wireless/rt2x00/
 
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index e0e2358..4550d24 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -873,6 +873,16 @@
 			off = offsetof(struct sk_buff, queue_mapping);
 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 			break;
+		case BPF_LDX | BPF_W | BPF_ABS:
+			/*
+			 * load a 32bit word from struct seccomp_data.
+			 * seccomp_check_filter() will already have checked
+			 * that k is 32bit aligned and lies within the
+			 * struct seccomp_data.
+			 */
+			ctx->seen |= SEEN_SKB;
+			emit(ARM_LDR_I(r_A, r_skb, k), ctx);
+			break;
 		default:
 			return -1;
 		}
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index de156ba..f6498ee 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -28,6 +28,9 @@
  *	      | old backchain |     |
  *	      +---------------+     |
  *	      |   r15 - r6    |     |
+ *	      +---------------+     |
+ *	      | 4 byte align  |     |
+ *	      | tail_call_cnt |     |
  * BFP	   -> +===============+     |
  *	      |		      |     |
  *	      |   BPF stack   |     |
@@ -46,14 +49,17 @@
  * R15	   -> +---------------+     + low
  *
  * We get 160 bytes stack space from calling function, but only use
- * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
+ * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
  */
 #define STK_SPACE	(MAX_BPF_STACK + 8 + 4 + 4 + 160)
-#define STK_160_UNUSED	(160 - 11 * 8)
+#define STK_160_UNUSED	(160 - 12 * 8)
 #define STK_OFF		(STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP	160	/* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN	168	/* Offset of SKB header length on stack */
 
+#define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
+#define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
+
 /* Offset to skip condition code check */
 #define OFF_OK		4
 
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 55423d8..d3766dd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -21,6 +21,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/init.h>
+#include <linux/bpf.h>
 #include <asm/cacheflush.h>
 #include <asm/dis.h>
 #include "bpf_jit.h"
@@ -40,6 +41,8 @@
 	int base_ip;		/* Base address for literal pool */
 	int ret0_ip;		/* Address of return 0 */
 	int exit_ip;		/* Address of exit */
+	int tail_call_start;	/* Tail call start offset */
+	int labels[1];		/* Labels for local jumps */
 };
 
 #define BPF_SIZE_MAX	4096	/* Max size for program */
@@ -49,6 +52,7 @@
 #define SEEN_RET0	4	/* ret0_ip points to a valid return 0 */
 #define SEEN_LITERAL	8	/* code uses literals */
 #define SEEN_FUNC	16	/* calls C functions */
+#define SEEN_TAIL_CALL	32	/* code uses tail calls */
 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -60,6 +64,7 @@
 #define REG_L		(__MAX_BPF_REG+3)	/* Literal pool register */
 #define REG_15		(__MAX_BPF_REG+4)	/* Register 15 */
 #define REG_0		REG_W0			/* Register 0 */
+#define REG_1		REG_W1			/* Register 1 */
 #define REG_2		BPF_REG_1		/* Register 2 */
 #define REG_14		BPF_REG_0		/* Register 14 */
 
@@ -223,6 +228,24 @@
 	REG_SET_SEEN(b3);					\
 })
 
+#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
+({								\
+	int rel = (jit->labels[label] - jit->prg) >> 1;		\
+	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff),	\
+	       op2 | mask << 12);				\
+	REG_SET_SEEN(b1);					\
+	REG_SET_SEEN(b2);					\
+})
+
+#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
+({								\
+	int rel = (jit->labels[label] - jit->prg) >> 1;		\
+	_EMIT6(op1 | (reg_high(b1) | mask) << 16 |		\
+		(rel & 0xffff), op2 | (imm & 0xff) << 8);	\
+	REG_SET_SEEN(b1);					\
+	BUILD_BUG_ON(((unsigned long) imm) > 0xff);		\
+})
+
 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 ({								\
 	/* Branch instruction needs 6 bytes */			\
@@ -286,7 +309,7 @@
  */
 static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 {
-	u32 off = 72 + (rs - 6) * 8;
+	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 
 	if (rs == re)
 		/* stg %rs,off(%r15) */
@@ -301,7 +324,7 @@
  */
 static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
 {
-	u32 off = 72 + (rs - 6) * 8;
+	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 
 	if (jit->seen & SEEN_STACK)
 		off += STK_OFF;
@@ -374,6 +397,16 @@
  */
 static void bpf_jit_prologue(struct bpf_jit *jit)
 {
+	if (jit->seen & SEEN_TAIL_CALL) {
+		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
+		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
+	} else {
+		/* j tail_call_start: NOP if no tail calls are used */
+		EMIT4_PCREL(0xa7f40000, 6);
+		_EMIT2(0);
+	}
+	/* Tail calls have to skip above initialization */
+	jit->tail_call_start = jit->prg;
 	/* Save registers */
 	save_restore_regs(jit, REGS_SAVE);
 	/* Setup literal pool */
@@ -951,6 +984,75 @@
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 		break;
 	}
+	case BPF_JMP | BPF_CALL | BPF_X:
+		/*
+		 * Implicit input:
+		 *  B1: pointer to ctx
+		 *  B2: pointer to bpf_array
+		 *  B3: index in bpf_array
+		 */
+		jit->seen |= SEEN_TAIL_CALL;
+
+		/*
+		 * if (index >= array->map.max_entries)
+		 *         goto out;
+		 */
+
+		/* llgf %w1,map.max_entries(%b2) */
+		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
+			      offsetof(struct bpf_array, map.max_entries));
+		/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
+		EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+				  REG_W1, 0, 0xa);
+
+		/*
+		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
+		 *         goto out;
+		 */
+
+		if (jit->seen & SEEN_STACK)
+			off = STK_OFF_TCCNT + STK_OFF;
+		else
+			off = STK_OFF_TCCNT;
+		/* lhi %w0,1 */
+		EMIT4_IMM(0xa7080000, REG_W0, 1);
+		/* laal %w1,%w0,off(%r15) */
+		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
+		/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
+		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
+				      MAX_TAIL_CALL_CNT, 0, 0x2);
+
+		/*
+		 * prog = array->prog[index];
+		 * if (prog == NULL)
+		 *         goto out;
+		 */
+
+		/* sllg %r1,%b3,3: %r1 = index * 8 */
+		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+		/* lg %r1,prog(%b2,%r1) */
+		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
+			      REG_1, offsetof(struct bpf_array, prog));
+		/* clgij %r1,0,0x8,label0 */
+		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
+
+		/*
+		 * Restore registers before calling function
+		 */
+		save_restore_regs(jit, REGS_RESTORE);
+
+		/*
+		 * goto *(prog->bpf_func + tail_call_start);
+		 */
+
+		/* lg %r1,bpf_func(%r1) */
+		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
+			      offsetof(struct bpf_prog, bpf_func));
+		/* bc 0xf,tail_call_start(%r1) */
+		_EMIT4(0x47f01000 + jit->tail_call_start);
+		/* out: */
+		jit->labels[0] = jit->prg;
+		break;
 	case BPF_JMP | BPF_EXIT: /* return b0 */
 		last = (i == fp->len - 1) ? 1 : 0;
 		if (last && !(jit->seen & SEEN_RET0))
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ddeff48..579a8fd 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
 #include <linux/filter.h>
 #include <linux/if_vlan.h>
 #include <asm/cacheflush.h>
+#include <linux/bpf.h>
 
 int bpf_jit_enable __read_mostly;
 
@@ -37,7 +38,8 @@
 	return ptr + len;
 }
 
-#define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
+#define EMIT(bytes, len) \
+	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
 
 #define EMIT1(b1)		EMIT(b1, 1)
 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
@@ -186,31 +188,31 @@
 #define BPF_MAX_INSN_SIZE	128
 #define BPF_INSN_SAFETY		64
 
-static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
-		  int oldproglen, struct jit_context *ctx)
+#define STACKSIZE \
+	(MAX_BPF_STACK + \
+	 32 /* space for rbx, r13, r14, r15 */ + \
+	 8 /* space for skb_copy_bits() buffer */)
+
+#define PROLOGUE_SIZE 51
+
+/* emit x64 prologue code for BPF program and check it's size.
+ * bpf_tail_call helper will skip it while jumping into another program
+ */
+static void emit_prologue(u8 **pprog)
 {
-	struct bpf_insn *insn = bpf_prog->insnsi;
-	int insn_cnt = bpf_prog->len;
-	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
-	bool seen_exit = false;
-	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
-	int i;
-	int proglen = 0;
-	u8 *prog = temp;
-	int stacksize = MAX_BPF_STACK +
-		32 /* space for rbx, r13, r14, r15 */ +
-		8 /* space for skb_copy_bits() buffer */;
+	u8 *prog = *pprog;
+	int cnt = 0;
 
 	EMIT1(0x55); /* push rbp */
 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
 
-	/* sub rsp, stacksize */
-	EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+	/* sub rsp, STACKSIZE */
+	EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
 
 	/* all classic BPF filters use R6(rbx) save it */
 
 	/* mov qword ptr [rbp-X],rbx */
-	EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+	EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
 
 	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
 	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
@@ -221,16 +223,112 @@
 	 */
 
 	/* mov qword ptr [rbp-X],r13 */
-	EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+	EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
 	/* mov qword ptr [rbp-X],r14 */
-	EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+	EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
 	/* mov qword ptr [rbp-X],r15 */
-	EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+	EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
 
 	/* clear A and X registers */
 	EMIT2(0x31, 0xc0); /* xor eax, eax */
 	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
 
+	/* clear tail_cnt: mov qword ptr [rbp-X], rax */
+	EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
+
+	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
+	*pprog = prog;
+}
+
+/* generate the following code:
+ * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
+ *   if (index >= array->map.max_entries)
+ *     goto out;
+ *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
+ *     goto out;
+ *   prog = array->prog[index];
+ *   if (prog == NULL)
+ *     goto out;
+ *   goto *(prog->bpf_func + prologue_size);
+ * out:
+ */
+static void emit_bpf_tail_call(u8 **pprog)
+{
+	u8 *prog = *pprog;
+	int label1, label2, label3;
+	int cnt = 0;
+
+	/* rdi - pointer to ctx
+	 * rsi - pointer to bpf_array
+	 * rdx - index in bpf_array
+	 */
+
+	/* if (index >= array->map.max_entries)
+	 *   goto out;
+	 */
+	EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
+	      offsetof(struct bpf_array, map.max_entries));
+	EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
+#define OFFSET1 44 /* number of bytes to jump */
+	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+	label1 = cnt;
+
+	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+	 *   goto out;
+	 */
+	EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
+	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
+#define OFFSET2 33
+	EMIT2(X86_JA, OFFSET2);                   /* ja out */
+	label2 = cnt;
+	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
+	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
+
+	/* prog = array->prog[index]; */
+	EMIT4(0x48, 0x8D, 0x44, 0xD6);            /* lea rax, [rsi + rdx * 8 + 0x50] */
+	EMIT1(offsetof(struct bpf_array, prog));
+	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
+
+	/* if (prog == NULL)
+	 *   goto out;
+	 */
+	EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
+#define OFFSET3 10
+	EMIT2(X86_JE, OFFSET3);                   /* je out */
+	label3 = cnt;
+
+	/* goto *(prog->bpf_func + prologue_size); */
+	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
+	      offsetof(struct bpf_prog, bpf_func));
+	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
+
+	/* now we're ready to jump into next BPF program
+	 * rdi == ctx (1st arg)
+	 * rax == prog->bpf_func + prologue_size
+	 */
+	EMIT2(0xFF, 0xE0);                        /* jmp rax */
+
+	/* out: */
+	BUILD_BUG_ON(cnt - label1 != OFFSET1);
+	BUILD_BUG_ON(cnt - label2 != OFFSET2);
+	BUILD_BUG_ON(cnt - label3 != OFFSET3);
+	*pprog = prog;
+}
+
+static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+		  int oldproglen, struct jit_context *ctx)
+{
+	struct bpf_insn *insn = bpf_prog->insnsi;
+	int insn_cnt = bpf_prog->len;
+	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+	bool seen_exit = false;
+	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
+	int i, cnt = 0;
+	int proglen = 0;
+	u8 *prog = temp;
+
+	emit_prologue(&prog);
+
 	if (seen_ld_abs) {
 		/* r9d : skb->len - skb->data_len (headlen)
 		 * r10 : skb->data
@@ -739,6 +837,10 @@
 			}
 			break;
 
+		case BPF_JMP | BPF_CALL | BPF_X:
+			emit_bpf_tail_call(&prog);
+			break;
+
 			/* cond jump */
 		case BPF_JMP | BPF_JEQ | BPF_X:
 		case BPF_JMP | BPF_JNE | BPF_X:
@@ -891,13 +993,13 @@
 			/* update cleanup_addr */
 			ctx->cleanup_addr = proglen;
 			/* mov rbx, qword ptr [rbp-X] */
-			EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+			EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
 			/* mov r13, qword ptr [rbp-X] */
-			EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+			EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
 			/* mov r14, qword ptr [rbp-X] */
-			EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+			EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
 			/* mov r15, qword ptr [rbp-X] */
-			EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+			EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
 
 			EMIT1(0xC9); /* leave */
 			EMIT1(0xC3); /* ret */
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 2bc180e..a8e7aa3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -247,7 +247,7 @@
 	if (!type)
 		goto unlock;
 
-	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
 	err = -ENOMEM;
 	if (!sk2)
 		goto unlock;
@@ -327,7 +327,7 @@
 		return -EPROTONOSUPPORT;
 
 	err = -ENOMEM;
-	sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+	sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
 	if (!sk)
 		goto out;
 
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 93dca2e..a8da3a5 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -116,8 +116,8 @@
 static short nvpibits = -1;
 static short nvcibits = -1;
 static short rx_skb_reserve = 16;
-static bool irq_coalesce = 1;
-static bool sdh = 0;
+static bool irq_coalesce = true;
+static bool sdh;
 
 /* Read from EEPROM = 0000 0011b */
 static unsigned int readtab[] = {
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 909c95b..feb023d 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -306,14 +306,12 @@
 	if (start_timer) {
 		start_timer = 0;
                 
-		init_timer(&stats_timer);
+		setup_timer(&stats_timer, idt77105_stats_timer_func, 0UL);
 		stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD;
-		stats_timer.function = idt77105_stats_timer_func;
 		add_timer(&stats_timer);
                 
-		init_timer(&restart_timer);
+		setup_timer(&restart_timer, idt77105_restart_timer_func, 0UL);
 		restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD;
-		restart_timer.function = idt77105_restart_timer_func;
 		add_timer(&restart_timer);
 	}
 	spin_unlock_irqrestore(&idt77105_priv_lock, flags);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 924f8e2..65e6590 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2618,7 +2618,7 @@
         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
            iadev->close_pending++;
 	   prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
-	   schedule_timeout(50);
+	   schedule_timeout(msecs_to_jiffies(500));
 	   finish_wait(&iadev->timeout_wait, &wait);
            spin_lock_irqsave(&iadev->tx_lock, flags); 
            while((skb = skb_dequeue(&iadev->tx_backlog))) {
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index fc6ffcf..be5fffb 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -29,12 +29,6 @@
 	select BCMA_DRIVER_PCI
 	default y
 
-config BCMA_DRIVER_PCI_HOSTMODE
-	bool "Driver for PCI core working in hostmode"
-	depends on BCMA && MIPS && BCMA_HOST_PCI
-	help
-	  PCI core hostmode operation (external PCI bus).
-
 config BCMA_HOST_SOC
 	bool "Support for BCMA in a SoC"
 	depends on BCMA
@@ -61,6 +55,12 @@
 	  This driver is also prerequisite for a hostmode PCIe core
 	  support.
 
+config BCMA_DRIVER_PCI_HOSTMODE
+	bool "Driver for PCI core working in hostmode"
+	depends on BCMA && MIPS && BCMA_DRIVER_PCI
+	help
+	  PCI core hostmode operation (external PCI bus).
+
 config BCMA_DRIVER_MIPS
 	bool "BCMA Broadcom MIPS core driver"
 	depends on BCMA && MIPS
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 74ccb02..5f6018e 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -226,6 +226,7 @@
 		chip->of_node	= cc->core->dev.of_node;
 #endif
 	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4707:
 	case BCMA_CHIP_ID_BCM5357:
 	case BCMA_CHIP_ID_BCM53572:
 		chip->ngpio	= 32;
@@ -235,16 +236,17 @@
 	}
 
 	/*
-	 * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
-	 * pin numbers. We don't have Device Tree there and we can't really use
-	 * relative (per chip) numbers.
-	 * So let's use predictable base for BCM47XX and "random" for all other.
+	 * Register SoC GPIO devices with absolute GPIO pin base.
+	 * On MIPS, we don't have Device Tree and we can't use relative (per chip)
+	 * GPIO numbers.
+	 * On some ARM devices, user space may want to access some system GPIO
+	 * pins directly, which is easier to do with a predictable GPIO base.
 	 */
-#if IS_BUILTIN(CONFIG_BCM47XX)
-	chip->base		= bus->num * BCMA_GPIO_MAX_PINS;
-#else
-	chip->base		= -1;
-#endif
+	if (IS_BUILTIN(CONFIG_BCM47XX) ||
+	    cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+		chip->base		= bus->num * BCMA_GPIO_MAX_PINS;
+	else
+		chip->base		= -1;
 
 	err = bcma_gpio_irq_domain_init(cc);
 	if (err)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cee2035..c097909 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -598,7 +598,7 @@
 	memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
 
 	what = "sock_create_kern";
-	err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+	err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
 			       SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (err < 0) {
 		sock = NULL;
@@ -693,7 +693,7 @@
 	memcpy(&my_addr, &connection->my_addr, my_addr_len);
 
 	what = "sock_create_kern";
-	err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+	err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
 			       SOCK_STREAM, IPPROTO_TCP, &s_listen);
 	if (err) {
 		s_listen = NULL;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index ed5c273..2e77707 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -9,6 +9,10 @@
 	tristate
 	select FW_LOADER
 
+config BT_RTL
+	tristate
+	select FW_LOADER
+
 config BT_HCIBTUSB
 	tristate "HCI USB driver"
 	depends on USB
@@ -32,6 +36,17 @@
 
 	  Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIBTUSB_RTL
+	bool "Realtek protocol support"
+	depends on BT_HCIBTUSB
+	select BT_RTL
+	default y
+	help
+	  The Realtek protocol support enables firmware and configuration
+	  download support for Realtek Bluetooth controllers.
+
+	  Say Y here to compile support for Realtek protocol.
+
 config BT_HCIBTSDIO
 	tristate "HCI SDIO driver"
 	depends on MMC
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index dd0d9c4..f40e194 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_BT_MRVL_SDIO)	+= btmrvl_sdio.o
 obj-$(CONFIG_BT_WILINK)		+= btwilink.o
 obj-$(CONFIG_BT_BCM)		+= btbcm.o
+obj-$(CONFIG_BT_RTL)		+= btrtl.o
 
 btmrvl-y			:= btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)	+= btmrvl_debugfs.o
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 8c81af6..e527a3e 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -80,6 +80,7 @@
 	{ USB_DEVICE(0x0489, 0xe057) },
 	{ USB_DEVICE(0x0489, 0xe056) },
 	{ USB_DEVICE(0x0489, 0xe05f) },
+	{ USB_DEVICE(0x0489, 0xe076) },
 	{ USB_DEVICE(0x0489, 0xe078) },
 	{ USB_DEVICE(0x04c5, 0x1330) },
 	{ USB_DEVICE(0x04CA, 0x3004) },
@@ -88,6 +89,7 @@
 	{ USB_DEVICE(0x04CA, 0x3007) },
 	{ USB_DEVICE(0x04CA, 0x3008) },
 	{ USB_DEVICE(0x04CA, 0x300b) },
+	{ USB_DEVICE(0x04CA, 0x300d) },
 	{ USB_DEVICE(0x04CA, 0x300f) },
 	{ USB_DEVICE(0x04CA, 0x3010) },
 	{ USB_DEVICE(0x0930, 0x0219) },
@@ -113,6 +115,7 @@
 	{ USB_DEVICE(0x13d3, 0x3408) },
 	{ USB_DEVICE(0x13d3, 0x3423) },
 	{ USB_DEVICE(0x13d3, 0x3432) },
+	{ USB_DEVICE(0x13d3, 0x3474) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -137,6 +140,7 @@
 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
@@ -145,6 +149,7 @@
 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -170,6 +175,7 @@
 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU22 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 6de97b3..7aab654 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -202,9 +202,8 @@
 		/* Send frame */
 		len = bt3c_write(iobase, 256, skb->data, skb->len);
 
-		if (len != skb->len) {
+		if (len != skb->len)
 			BT_ERR("Very strange");
-		}
 
 		kfree_skb(skb);
 
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 4bba866..1e1a432 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -33,6 +33,7 @@
 #define VERSION "0.1"
 
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
+#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
 
 int btbcm_check_bdaddr(struct hci_dev *hdev)
 {
@@ -55,17 +56,19 @@
 	}
 
 	bda = (struct hci_rp_read_bd_addr *)skb->data;
-	if (bda->status) {
-		BT_ERR("%s: BCM: Device address result failed (%02x)",
-		       hdev->name, bda->status);
-		kfree_skb(skb);
-		return -bt_to_errno(bda->status);
-	}
 
-	/* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
+	/* Check if the address indicates a controller with either an
+	 * invalid or default address. In both cases the device needs
+	 * to be marked as not having a valid address.
+	 *
+	 * The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
 	 * with no configured address.
+	 *
+	 * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
+	 * with waiting for configuration state.
 	 */
-	if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0)) {
+	if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
+	    !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
 		BT_INFO("%s: BCM: Using default device address (%pMR)",
 			hdev->name, &bda->bdaddr);
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -95,21 +98,14 @@
 }
 EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
 
-int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
+int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
 {
 	const struct hci_command_hdr *cmd;
-	const struct firmware *fw;
 	const u8 *fw_ptr;
 	size_t fw_size;
 	struct sk_buff *skb;
 	u16 opcode;
-	int err;
-
-	err = request_firmware(&fw, firmware, &hdev->dev);
-	if (err < 0) {
-		BT_INFO("%s: BCM: Patch %s not found", hdev->name, firmware);
-		return err;
-	}
+	int err = 0;
 
 	/* Start Download */
 	skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
@@ -135,8 +131,7 @@
 		fw_size -= sizeof(*cmd);
 
 		if (fw_size < cmd->plen) {
-			BT_ERR("%s: BCM: Patch %s is corrupted", hdev->name,
-			       firmware);
+			BT_ERR("%s: BCM: Patch is corrupted", hdev->name);
 			err = -EINVAL;
 			goto done;
 		}
@@ -162,7 +157,6 @@
 	msleep(250);
 
 done:
-	release_firmware(fw);
 	return err;
 }
 EXPORT_SYMBOL(btbcm_patchram);
@@ -248,9 +242,101 @@
 	const char *name;
 } bcm_uart_subver_table[] = {
 	{ 0x410e, "BCM43341B0"	},	/* 002.001.014 */
+	{ 0x4406, "BCM4324B3"	},	/* 002.004.006 */
+	{ 0x610c, "BCM4354"	},	/* 003.001.012 */
 	{ }
 };
 
+int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
+{
+	u16 subver, rev;
+	const char *hw_name = NULL;
+	struct sk_buff *skb;
+	struct hci_rp_read_local_version *ver;
+	int i, err;
+
+	/* Reset */
+	err = btbcm_reset(hdev);
+	if (err)
+		return err;
+
+	/* Read Local Version Info */
+	skb = btbcm_read_local_version(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	ver = (struct hci_rp_read_local_version *)skb->data;
+	rev = le16_to_cpu(ver->hci_rev);
+	subver = le16_to_cpu(ver->lmp_subver);
+	kfree_skb(skb);
+
+	/* Read Verbose Config Version Info */
+	skb = btbcm_read_verbose_config(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+	kfree_skb(skb);
+
+	switch ((rev & 0xf000) >> 12) {
+	case 0:
+	case 1:
+	case 3:
+		for (i = 0; bcm_uart_subver_table[i].name; i++) {
+			if (subver == bcm_uart_subver_table[i].subver) {
+				hw_name = bcm_uart_subver_table[i].name;
+				break;
+			}
+		}
+
+		snprintf(fw_name, len, "brcm/%s.hcd", hw_name ? : "BCM");
+		break;
+	default:
+		return 0;
+	}
+
+	BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+		hw_name ? : "BCM", (subver & 0x7000) >> 13,
+		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_initialize);
+
+int btbcm_finalize(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+	struct hci_rp_read_local_version *ver;
+	u16 subver, rev;
+	int err;
+
+	/* Reset */
+	err = btbcm_reset(hdev);
+	if (err)
+		return err;
+
+	/* Read Local Version Info */
+	skb = btbcm_read_local_version(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	ver = (struct hci_rp_read_local_version *)skb->data;
+	rev = le16_to_cpu(ver->hci_rev);
+	subver = le16_to_cpu(ver->lmp_subver);
+	kfree_skb(skb);
+
+	BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+		(subver & 0x7000) >> 13, (subver & 0x1f00) >> 8,
+		(subver & 0x00ff), rev & 0x0fff);
+
+	btbcm_check_bdaddr(hdev);
+
+	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_finalize);
+
 static const struct {
 	u16 subver;
 	const char *name;
@@ -271,6 +357,7 @@
 int btbcm_setup_patchram(struct hci_dev *hdev)
 {
 	char fw_name[64];
+	const struct firmware *fw;
 	u16 subver, rev, pid, vid;
 	const char *hw_name = NULL;
 	struct sk_buff *skb;
@@ -302,6 +389,7 @@
 
 	switch ((rev & 0xf000) >> 12) {
 	case 0:
+	case 3:
 		for (i = 0; bcm_uart_subver_table[i].name; i++) {
 			if (subver == bcm_uart_subver_table[i].subver) {
 				hw_name = bcm_uart_subver_table[i].name;
@@ -341,9 +429,15 @@
 		hw_name ? : "BCM", (subver & 0x7000) >> 13,
 		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
-	err = btbcm_patchram(hdev, fw_name);
-	if (err == -ENOENT)
+	err = request_firmware(&fw, fw_name, &hdev->dev);
+	if (err < 0) {
+		BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name);
 		return 0;
+	}
+
+	btbcm_patchram(hdev, fw);
+
+	release_firmware(fw);
 
 	/* Reset */
 	err = btbcm_reset(hdev);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index eb6ab5f..d9e6b41 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -21,15 +21,61 @@
  *
  */
 
+#define BCM_UART_CLOCK_48MHZ	0x01
+#define BCM_UART_CLOCK_24MHZ	0x02
+
+struct bcm_update_uart_baud_rate {
+	__le16 zero;
+	__le32 baud_rate;
+} __packed;
+
+struct bcm_write_uart_clock_setting {
+	__u8 type;
+} __packed;
+
+struct bcm_set_sleep_mode {
+	__u8 sleep_mode;
+	__u8 idle_host;
+	__u8 idle_dev;
+	__u8 bt_wake_active;
+	__u8 host_wake_active;
+	__u8 allow_host_sleep;
+	__u8 combine_modes;
+	__u8 tristate_control;
+	__u8 usb_auto_sleep;
+	__u8 usb_resume_timeout;
+	__u8 pulsed_host_wake;
+	__u8 break_to_host;
+} __packed;
+
+struct bcm_set_pcm_int_params {
+	__u8 routing;
+	__u8 rate;
+	__u8 frame_sync;
+	__u8 sync_mode;
+	__u8 clock_mode;
+} __packed;
+
+struct bcm_set_pcm_format_params {
+	__u8 lsb_first;
+	__u8 fill_value;
+	__u8 fill_method;
+	__u8 fill_num;
+	__u8 right_justify;
+} __packed;
+
 #if IS_ENABLED(CONFIG_BT_BCM)
 
 int btbcm_check_bdaddr(struct hci_dev *hdev);
 int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
-int btbcm_patchram(struct hci_dev *hdev, const char *firmware);
+int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw);
 
 int btbcm_setup_patchram(struct hci_dev *hdev);
 int btbcm_setup_apple(struct hci_dev *hdev);
 
+int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len);
+int btbcm_finalize(struct hci_dev *hdev);
+
 #else
 
 static inline int btbcm_check_bdaddr(struct hci_dev *hdev)
@@ -42,7 +88,7 @@
 	return -EOPNOTSUPP;
 }
 
-static inline int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
+static inline int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
 {
 	return -EOPNOTSUPP;
 }
@@ -57,4 +103,15 @@
 	return 0;
 }
 
+static inline int btbcm_initialize(struct hci_dev *hdev, char *fw_name,
+				   size_t len)
+{
+	return 0;
+}
+
+static inline int btbcm_finalize(struct hci_dev *hdev)
+{
+	return 0;
+}
+
 #endif
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 2d43d42..828f2f8 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -53,12 +53,6 @@
 	}
 
 	bda = (struct hci_rp_read_bd_addr *)skb->data;
-	if (bda->status) {
-		BT_ERR("%s: Intel device address result failed (%02x)",
-		       hdev->name, bda->status);
-		kfree_skb(skb);
-		return -bt_to_errno(bda->status);
-	}
 
 	/* For some Intel based controllers, the default Bluetooth device
 	 * address 00:03:19:9E:8B:00 can be found. These controllers are
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 01d6da5..b9a8119 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1217,7 +1217,7 @@
 	unsigned int reg, reg_start, reg_end;
 	enum rdwr_status stat;
 	u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr;
-	u8 dump_num, idx, i, read_reg, doneflag = 0;
+	u8 dump_num = 0, idx, i, read_reg, doneflag = 0;
 	u32 memory_size, fw_dump_len = 0;
 
 	/* dump sdio register first */
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
new file mode 100644
index 0000000..8428893
--- /dev/null
+++ b/drivers/bluetooth/btrtl.c
@@ -0,0 +1,390 @@
+/*
+ *  Bluetooth support for Realtek devices
+ *
+ *  Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/usb.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btrtl.h"
+
+#define VERSION "0.1"
+
+#define RTL_EPATCH_SIGNATURE	"Realtech"
+#define RTL_ROM_LMP_3499	0x3499
+#define RTL_ROM_LMP_8723A	0x1200
+#define RTL_ROM_LMP_8723B	0x8723
+#define RTL_ROM_LMP_8821A	0x8821
+#define RTL_ROM_LMP_8761A	0x8761
+
+static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
+{
+	struct rtl_rom_version_evt *rom_version;
+	struct sk_buff *skb;
+
+	/* Read RTL ROM version command */
+	skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Read ROM version failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+
+	if (skb->len != sizeof(*rom_version)) {
+		BT_ERR("%s: RTL version event length mismatch", hdev->name);
+		kfree_skb(skb);
+		return -EIO;
+	}
+
+	rom_version = (struct rtl_rom_version_evt *)skb->data;
+	BT_INFO("%s: rom_version status=%x version=%x",
+		hdev->name, rom_version->status, rom_version->version);
+
+	*version = rom_version->version;
+
+	kfree_skb(skb);
+	return 0;
+}
+
+static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+				   const struct firmware *fw,
+				   unsigned char **_buf)
+{
+	const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+	struct rtl_epatch_header *epatch_info;
+	unsigned char *buf;
+	int i, ret, len;
+	size_t min_size;
+	u8 opcode, length, data, rom_version = 0;
+	int project_id = -1;
+	const unsigned char *fwptr, *chip_id_base;
+	const unsigned char *patch_length_base, *patch_offset_base;
+	u32 patch_offset = 0;
+	u16 patch_length, num_patches;
+	const u16 project_id_to_lmp_subver[] = {
+		RTL_ROM_LMP_8723A,
+		RTL_ROM_LMP_8723B,
+		RTL_ROM_LMP_8821A,
+		RTL_ROM_LMP_8761A
+	};
+
+	ret = rtl_read_rom_version(hdev, &rom_version);
+	if (ret)
+		return ret;
+
+	min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+	if (fw->size < min_size)
+		return -EINVAL;
+
+	fwptr = fw->data + fw->size - sizeof(extension_sig);
+	if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
+		BT_ERR("%s: extension section signature mismatch", hdev->name);
+		return -EINVAL;
+	}
+
+	/* Loop from the end of the firmware parsing instructions, until
+	 * we find an instruction that identifies the "project ID" for the
+	 * hardware supported by this firwmare file.
+	 * Once we have that, we double-check that that project_id is suitable
+	 * for the hardware we are working with.
+	 */
+	while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+		opcode = *--fwptr;
+		length = *--fwptr;
+		data = *--fwptr;
+
+		BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
+
+		if (opcode == 0xff) /* EOF */
+			break;
+
+		if (length == 0) {
+			BT_ERR("%s: found instruction with length 0",
+			       hdev->name);
+			return -EINVAL;
+		}
+
+		if (opcode == 0 && length == 1) {
+			project_id = data;
+			break;
+		}
+
+		fwptr -= length;
+	}
+
+	if (project_id < 0) {
+		BT_ERR("%s: failed to find version instruction", hdev->name);
+		return -EINVAL;
+	}
+
+	if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+		BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+		return -EINVAL;
+	}
+
+	if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+		BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
+		       project_id_to_lmp_subver[project_id], lmp_subver);
+		return -EINVAL;
+	}
+
+	epatch_info = (struct rtl_epatch_header *)fw->data;
+	if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+		BT_ERR("%s: bad EPATCH signature", hdev->name);
+		return -EINVAL;
+	}
+
+	num_patches = le16_to_cpu(epatch_info->num_patches);
+	BT_DBG("fw_version=%x, num_patches=%d",
+	       le32_to_cpu(epatch_info->fw_version), num_patches);
+
+	/* After the rtl_epatch_header there is a funky patch metadata section.
+	 * Assuming 2 patches, the layout is:
+	 * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
+	 *
+	 * Find the right patch for this chip.
+	 */
+	min_size += 8 * num_patches;
+	if (fw->size < min_size)
+		return -EINVAL;
+
+	chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+	patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
+	patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
+	for (i = 0; i < num_patches; i++) {
+		u16 chip_id = get_unaligned_le16(chip_id_base +
+						 (i * sizeof(u16)));
+		if (chip_id == rom_version + 1) {
+			patch_length = get_unaligned_le16(patch_length_base +
+							  (i * sizeof(u16)));
+			patch_offset = get_unaligned_le32(patch_offset_base +
+							  (i * sizeof(u32)));
+			break;
+		}
+	}
+
+	if (!patch_offset) {
+		BT_ERR("%s: didn't find patch for chip id %d",
+		       hdev->name, rom_version);
+		return -EINVAL;
+	}
+
+	BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
+	min_size = patch_offset + patch_length;
+	if (fw->size < min_size)
+		return -EINVAL;
+
+	/* Copy the firmware into a new buffer and write the version at
+	 * the end.
+	 */
+	len = patch_length;
+	buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+	*_buf = buf;
+	return len;
+}
+
+static int rtl_download_firmware(struct hci_dev *hdev,
+				 const unsigned char *data, int fw_len)
+{
+	struct rtl_download_cmd *dl_cmd;
+	int frag_num = fw_len / RTL_FRAG_LEN + 1;
+	int frag_len = RTL_FRAG_LEN;
+	int ret = 0;
+	int i;
+
+	dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+	if (!dl_cmd)
+		return -ENOMEM;
+
+	for (i = 0; i < frag_num; i++) {
+		struct sk_buff *skb;
+
+		BT_DBG("download fw (%d/%d)", i, frag_num);
+
+		dl_cmd->index = i;
+		if (i == (frag_num - 1)) {
+			dl_cmd->index |= 0x80; /* data end */
+			frag_len = fw_len % RTL_FRAG_LEN;
+		}
+		memcpy(dl_cmd->data, data, frag_len);
+
+		/* Send download command */
+		skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
+				     HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb)) {
+			BT_ERR("%s: download fw command failed (%ld)",
+			       hdev->name, PTR_ERR(skb));
+			ret = -PTR_ERR(skb);
+			goto out;
+		}
+
+		if (skb->len != sizeof(struct rtl_download_response)) {
+			BT_ERR("%s: download fw event length mismatch",
+			       hdev->name);
+			kfree_skb(skb);
+			ret = -EIO;
+			goto out;
+		}
+
+		kfree_skb(skb);
+		data += RTL_FRAG_LEN;
+	}
+
+out:
+	kfree(dl_cmd);
+	return ret;
+}
+
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+{
+	const struct firmware *fw;
+	int ret;
+
+	BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+	ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
+	if (ret < 0) {
+		BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
+		return ret;
+	}
+
+	if (fw->size < 8) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Check that the firmware doesn't have the epatch signature
+	 * (which is only for RTL8723B and newer).
+	 */
+	if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
+		BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = rtl_download_firmware(hdev, fw->data, fw->size);
+
+out:
+	release_firmware(fw);
+	return ret;
+}
+
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
+				const char *fw_name)
+{
+	unsigned char *fw_data = NULL;
+	const struct firmware *fw;
+	int ret;
+
+	BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+	ret = request_firmware(&fw, fw_name, &hdev->dev);
+	if (ret < 0) {
+		BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
+		return ret;
+	}
+
+	ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+	if (ret < 0)
+		goto out;
+
+	ret = rtl_download_firmware(hdev, fw_data, ret);
+	kfree(fw_data);
+	if (ret < 0)
+		goto out;
+
+out:
+	release_firmware(fw);
+	return ret;
+}
+
+static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return skb;
+	}
+
+	if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+		BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+		       hdev->name);
+		kfree_skb(skb);
+		return ERR_PTR(-EIO);
+	}
+
+	return skb;
+}
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+	struct hci_rp_read_local_version *resp;
+	u16 lmp_subver;
+
+	skb = btrtl_read_local_version(hdev);
+	if (IS_ERR(skb))
+		return -PTR_ERR(skb);
+
+	resp = (struct hci_rp_read_local_version *)skb->data;
+	BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+		"lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
+		resp->lmp_ver, resp->lmp_subver);
+
+	lmp_subver = le16_to_cpu(resp->lmp_subver);
+	kfree_skb(skb);
+
+	/* Match a set of subver values that correspond to stock firmware,
+	 * which is not compatible with standard btusb.
+	 * If matched, upload an alternative firmware that does conform to
+	 * standard btusb. Once that firmware is uploaded, the subver changes
+	 * to a different value.
+	 */
+	switch (lmp_subver) {
+	case RTL_ROM_LMP_8723A:
+	case RTL_ROM_LMP_3499:
+		return btrtl_setup_rtl8723a(hdev);
+	case RTL_ROM_LMP_8723B:
+		return btrtl_setup_rtl8723b(hdev, lmp_subver,
+					    "rtl_bt/rtl8723b_fw.bin");
+	case RTL_ROM_LMP_8821A:
+		return btrtl_setup_rtl8723b(hdev, lmp_subver,
+					    "rtl_bt/rtl8821a_fw.bin");
+	case RTL_ROM_LMP_8761A:
+		return btrtl_setup_rtl8723b(hdev, lmp_subver,
+					    "rtl_bt/rtl8761a_fw.bin");
+	default:
+		BT_INFO("rtl: assuming no firmware upload needed.");
+		return 0;
+	}
+}
+EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+
+MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
new file mode 100644
index 0000000..38ffe48
--- /dev/null
+++ b/drivers/bluetooth/btrtl.h
@@ -0,0 +1,52 @@
+/*
+ *  Bluetooth support for Realtek devices
+ *
+ *  Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#define RTL_FRAG_LEN 252
+
+struct rtl_download_cmd {
+	__u8 index;
+	__u8 data[RTL_FRAG_LEN];
+} __packed;
+
+struct rtl_download_response {
+	__u8 status;
+	__u8 index;
+} __packed;
+
+struct rtl_rom_version_evt {
+	__u8 status;
+	__u8 version;
+} __packed;
+
+struct rtl_epatch_header {
+	__u8 signature[8];
+	__le32 fw_version;
+	__le16 num_patches;
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_RTL)
+
+int btrtl_setup_realtek(struct hci_dev *hdev);
+
+#else
+
+static inline int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3c10d4d..b4cf8d9 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -31,13 +31,14 @@
 
 #include "btintel.h"
 #include "btbcm.h"
+#include "btrtl.h"
 
 #define VERSION "0.8"
 
 static bool disable_scofix;
 static bool force_scofix;
 
-static bool reset = 1;
+static bool reset = true;
 
 static struct usb_driver btusb_driver;
 
@@ -178,6 +179,7 @@
 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
@@ -186,6 +188,7 @@
 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -211,6 +214,7 @@
 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -265,7 +269,7 @@
 	{ USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
 
 	/* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
-	{ USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
+	{ USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
 
 	/* Digianswer devices */
 	{ USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
@@ -330,6 +334,7 @@
 #define BTUSB_FIRMWARE_LOADED	7
 #define BTUSB_FIRMWARE_FAILED	8
 #define BTUSB_BOOTING		9
+#define BTUSB_RESET_RESUME	10
 
 struct btusb_data {
 	struct hci_dev       *hdev;
@@ -1298,28 +1303,6 @@
 	usb_autopm_put_interface(data->intf);
 }
 
-static struct sk_buff *btusb_read_local_version(struct hci_dev *hdev)
-{
-	struct sk_buff *skb;
-
-	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
-			     HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb)) {
-		BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return skb;
-	}
-
-	if (skb->len != sizeof(struct hci_rp_read_local_version)) {
-		BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
-		       hdev->name);
-		kfree_skb(skb);
-		return ERR_PTR(-EIO);
-	}
-
-	return skb;
-}
-
 static int btusb_setup_bcm92035(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
@@ -1340,408 +1323,40 @@
 {
 	struct hci_rp_read_local_version *rp;
 	struct sk_buff *skb;
-	int ret;
 
 	BT_DBG("%s", hdev->name);
 
-	skb = btusb_read_local_version(hdev);
-	if (IS_ERR(skb))
-		return -PTR_ERR(skb);
-
-	rp = (struct hci_rp_read_local_version *)skb->data;
-
-	if (!rp->status) {
-		if (le16_to_cpu(rp->manufacturer) != 10) {
-			/* Clear the reset quirk since this is not an actual
-			 * early Bluetooth 1.1 device from CSR.
-			 */
-			clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
-
-			/* These fake CSR controllers have all a broken
-			 * stored link key handling and so just disable it.
-			 */
-			set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY,
-				&hdev->quirks);
-		}
-	}
-
-	ret = -bt_to_errno(rp->status);
-
-	kfree_skb(skb);
-
-	return ret;
-}
-
-#define RTL_FRAG_LEN 252
-
-struct rtl_download_cmd {
-	__u8 index;
-	__u8 data[RTL_FRAG_LEN];
-} __packed;
-
-struct rtl_download_response {
-	__u8 status;
-	__u8 index;
-} __packed;
-
-struct rtl_rom_version_evt {
-	__u8 status;
-	__u8 version;
-} __packed;
-
-struct rtl_epatch_header {
-	__u8 signature[8];
-	__le32 fw_version;
-	__le16 num_patches;
-} __packed;
-
-#define RTL_EPATCH_SIGNATURE	"Realtech"
-#define RTL_ROM_LMP_3499	0x3499
-#define RTL_ROM_LMP_8723A	0x1200
-#define RTL_ROM_LMP_8723B	0x8723
-#define RTL_ROM_LMP_8821A	0x8821
-#define RTL_ROM_LMP_8761A	0x8761
-
-static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
-{
-	struct rtl_rom_version_evt *rom_version;
-	struct sk_buff *skb;
-	int ret;
-
-	/* Read RTL ROM version command */
-	skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+			     HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Read ROM version failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return PTR_ERR(skb);
+		int err = PTR_ERR(skb);
+		BT_ERR("%s: CSR: Local version failed (%d)", hdev->name, err);
+		return err;
 	}
 
-	if (skb->len != sizeof(*rom_version)) {
-		BT_ERR("%s: RTL version event length mismatch", hdev->name);
+	if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+		BT_ERR("%s: CSR: Local version length mismatch", hdev->name);
 		kfree_skb(skb);
 		return -EIO;
 	}
 
-	rom_version = (struct rtl_rom_version_evt *)skb->data;
-	BT_INFO("%s: rom_version status=%x version=%x",
-		hdev->name, rom_version->status, rom_version->version);
+	rp = (struct hci_rp_read_local_version *)skb->data;
 
-	ret = rom_version->status;
-	if (ret == 0)
-		*version = rom_version->version;
+	if (le16_to_cpu(rp->manufacturer) != 10) {
+		/* Clear the reset quirk since this is not an actual
+		 * early Bluetooth 1.1 device from CSR.
+		 */
+		clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+		/* These fake CSR controllers have all a broken
+		 * stored link key handling and so just disable it.
+		 */
+		set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+	}
 
 	kfree_skb(skb);
-	return ret;
-}
 
-static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
-				   const struct firmware *fw,
-				   unsigned char **_buf)
-{
-	const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
-	struct rtl_epatch_header *epatch_info;
-	unsigned char *buf;
-	int i, ret, len;
-	size_t min_size;
-	u8 opcode, length, data, rom_version = 0;
-	int project_id = -1;
-	const unsigned char *fwptr, *chip_id_base;
-	const unsigned char *patch_length_base, *patch_offset_base;
-	u32 patch_offset = 0;
-	u16 patch_length, num_patches;
-	const u16 project_id_to_lmp_subver[] = {
-		RTL_ROM_LMP_8723A,
-		RTL_ROM_LMP_8723B,
-		RTL_ROM_LMP_8821A,
-		RTL_ROM_LMP_8761A
-	};
-
-	ret = rtl_read_rom_version(hdev, &rom_version);
-	if (ret)
-		return -bt_to_errno(ret);
-
-	min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
-	if (fw->size < min_size)
-		return -EINVAL;
-
-	fwptr = fw->data + fw->size - sizeof(extension_sig);
-	if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
-		BT_ERR("%s: extension section signature mismatch", hdev->name);
-		return -EINVAL;
-	}
-
-	/* Loop from the end of the firmware parsing instructions, until
-	 * we find an instruction that identifies the "project ID" for the
-	 * hardware supported by this firwmare file.
-	 * Once we have that, we double-check that that project_id is suitable
-	 * for the hardware we are working with.
-	 */
-	while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
-		opcode = *--fwptr;
-		length = *--fwptr;
-		data = *--fwptr;
-
-		BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
-
-		if (opcode == 0xff) /* EOF */
-			break;
-
-		if (length == 0) {
-			BT_ERR("%s: found instruction with length 0",
-			       hdev->name);
-			return -EINVAL;
-		}
-
-		if (opcode == 0 && length == 1) {
-			project_id = data;
-			break;
-		}
-
-		fwptr -= length;
-	}
-
-	if (project_id < 0) {
-		BT_ERR("%s: failed to find version instruction", hdev->name);
-		return -EINVAL;
-	}
-
-	if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
-		BT_ERR("%s: unknown project id %d", hdev->name, project_id);
-		return -EINVAL;
-	}
-
-	if (lmp_subver != project_id_to_lmp_subver[project_id]) {
-		BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
-		       project_id_to_lmp_subver[project_id], lmp_subver);
-		return -EINVAL;
-	}
-
-	epatch_info = (struct rtl_epatch_header *)fw->data;
-	if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
-		BT_ERR("%s: bad EPATCH signature", hdev->name);
-		return -EINVAL;
-	}
-
-	num_patches = le16_to_cpu(epatch_info->num_patches);
-	BT_DBG("fw_version=%x, num_patches=%d",
-	       le32_to_cpu(epatch_info->fw_version), num_patches);
-
-	/* After the rtl_epatch_header there is a funky patch metadata section.
-	 * Assuming 2 patches, the layout is:
-	 * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
-	 *
-	 * Find the right patch for this chip.
-	 */
-	min_size += 8 * num_patches;
-	if (fw->size < min_size)
-		return -EINVAL;
-
-	chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
-	patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
-	patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
-	for (i = 0; i < num_patches; i++) {
-		u16 chip_id = get_unaligned_le16(chip_id_base +
-						 (i * sizeof(u16)));
-		if (chip_id == rom_version + 1) {
-			patch_length = get_unaligned_le16(patch_length_base +
-							  (i * sizeof(u16)));
-			patch_offset = get_unaligned_le32(patch_offset_base +
-							  (i * sizeof(u32)));
-			break;
-		}
-	}
-
-	if (!patch_offset) {
-		BT_ERR("%s: didn't find patch for chip id %d",
-		       hdev->name, rom_version);
-		return -EINVAL;
-	}
-
-	BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
-	min_size = patch_offset + patch_length;
-	if (fw->size < min_size)
-		return -EINVAL;
-
-	/* Copy the firmware into a new buffer and write the version at
-	 * the end.
-	 */
-	len = patch_length;
-	buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
-
-	*_buf = buf;
-	return len;
-}
-
-static int rtl_download_firmware(struct hci_dev *hdev,
-				 const unsigned char *data, int fw_len)
-{
-	struct rtl_download_cmd *dl_cmd;
-	int frag_num = fw_len / RTL_FRAG_LEN + 1;
-	int frag_len = RTL_FRAG_LEN;
-	int ret = 0;
-	int i;
-
-	dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
-	if (!dl_cmd)
-		return -ENOMEM;
-
-	for (i = 0; i < frag_num; i++) {
-		struct rtl_download_response *dl_resp;
-		struct sk_buff *skb;
-
-		BT_DBG("download fw (%d/%d)", i, frag_num);
-
-		dl_cmd->index = i;
-		if (i == (frag_num - 1)) {
-			dl_cmd->index |= 0x80; /* data end */
-			frag_len = fw_len % RTL_FRAG_LEN;
-		}
-		memcpy(dl_cmd->data, data, frag_len);
-
-		/* Send download command */
-		skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
-				     HCI_INIT_TIMEOUT);
-		if (IS_ERR(skb)) {
-			BT_ERR("%s: download fw command failed (%ld)",
-			       hdev->name, PTR_ERR(skb));
-			ret = -PTR_ERR(skb);
-			goto out;
-		}
-
-		if (skb->len != sizeof(*dl_resp)) {
-			BT_ERR("%s: download fw event length mismatch",
-			       hdev->name);
-			kfree_skb(skb);
-			ret = -EIO;
-			goto out;
-		}
-
-		dl_resp = (struct rtl_download_response *)skb->data;
-		if (dl_resp->status != 0) {
-			kfree_skb(skb);
-			ret = bt_to_errno(dl_resp->status);
-			goto out;
-		}
-
-		kfree_skb(skb);
-		data += RTL_FRAG_LEN;
-	}
-
-out:
-	kfree(dl_cmd);
-	return ret;
-}
-
-static int btusb_setup_rtl8723a(struct hci_dev *hdev)
-{
-	struct btusb_data *data = dev_get_drvdata(&hdev->dev);
-	struct usb_device *udev = interface_to_usbdev(data->intf);
-	const struct firmware *fw;
-	int ret;
-
-	BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
-	ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
-	if (ret < 0) {
-		BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
-		return ret;
-	}
-
-	if (fw->size < 8) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* Check that the firmware doesn't have the epatch signature
-	 * (which is only for RTL8723B and newer).
-	 */
-	if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
-		BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
-	release_firmware(fw);
-	return ret;
-}
-
-static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
-				const char *fw_name)
-{
-	struct btusb_data *data = dev_get_drvdata(&hdev->dev);
-	struct usb_device *udev = interface_to_usbdev(data->intf);
-	unsigned char *fw_data = NULL;
-	const struct firmware *fw;
-	int ret;
-
-	BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
-	ret = request_firmware(&fw, fw_name, &udev->dev);
-	if (ret < 0) {
-		BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
-		return ret;
-	}
-
-	ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
-	if (ret < 0)
-		goto out;
-
-	ret = rtl_download_firmware(hdev, fw_data, ret);
-	kfree(fw_data);
-	if (ret < 0)
-		goto out;
-
-out:
-	release_firmware(fw);
-	return ret;
-}
-
-static int btusb_setup_realtek(struct hci_dev *hdev)
-{
-	struct sk_buff *skb;
-	struct hci_rp_read_local_version *resp;
-	u16 lmp_subver;
-
-	skb = btusb_read_local_version(hdev);
-	if (IS_ERR(skb))
-		return -PTR_ERR(skb);
-
-	resp = (struct hci_rp_read_local_version *)skb->data;
-	BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
-		"lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
-		resp->lmp_ver, resp->lmp_subver);
-
-	lmp_subver = le16_to_cpu(resp->lmp_subver);
-	kfree_skb(skb);
-
-	/* Match a set of subver values that correspond to stock firmware,
-	 * which is not compatible with standard btusb.
-	 * If matched, upload an alternative firmware that does conform to
-	 * standard btusb. Once that firmware is uploaded, the subver changes
-	 * to a different value.
-	 */
-	switch (lmp_subver) {
-	case RTL_ROM_LMP_8723A:
-	case RTL_ROM_LMP_3499:
-		return btusb_setup_rtl8723a(hdev);
-	case RTL_ROM_LMP_8723B:
-		return btusb_setup_rtl8723b(hdev, lmp_subver,
-					    "rtl_bt/rtl8723b_fw.bin");
-	case RTL_ROM_LMP_8821A:
-		return btusb_setup_rtl8723b(hdev, lmp_subver,
-					    "rtl_bt/rtl8821a_fw.bin");
-	case RTL_ROM_LMP_8761A:
-		return btusb_setup_rtl8723b(hdev, lmp_subver,
-					    "rtl_bt/rtl8761a_fw.bin");
-	default:
-		BT_INFO("rtl: assuming no firmware upload needed.");
-		return 0;
-	}
+	return 0;
 }
 
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
@@ -1951,12 +1566,6 @@
 	}
 
 	ver = (struct intel_version *)skb->data;
-	if (ver->status) {
-		BT_ERR("%s Intel fw version event failed (%02x)", hdev->name,
-		       ver->status);
-		kfree_skb(skb);
-		return -bt_to_errno(ver->status);
-	}
 
 	BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
 		hdev->name, ver->hw_platform, ver->hw_variant,
@@ -1990,6 +1599,8 @@
 	}
 	fw_ptr = fw->data;
 
+	kfree_skb(skb);
+
 	/* This Intel specific command enables the manufacturer mode of the
 	 * controller.
 	 *
@@ -2004,15 +1615,6 @@
 		return PTR_ERR(skb);
 	}
 
-	if (skb->data[0]) {
-		u8 evt_status = skb->data[0];
-
-		BT_ERR("%s enable Intel manufacturer mode event failed (%02x)",
-		       hdev->name, evt_status);
-		kfree_skb(skb);
-		release_firmware(fw);
-		return -bt_to_errno(evt_status);
-	}
 	kfree_skb(skb);
 
 	disable_patch = 1;
@@ -2331,6 +1933,7 @@
 	struct intel_boot_params *params;
 	const struct firmware *fw;
 	const u8 *fw_ptr;
+	u32 frag_len;
 	char fwname[64];
 	ktime_t calltime, delta, rettime;
 	unsigned long long duration;
@@ -2358,13 +1961,6 @@
 	}
 
 	ver = (struct intel_version *)skb->data;
-	if (ver->status) {
-		BT_ERR("%s: Intel version command failure (%02x)",
-		       hdev->name, ver->status);
-		err = -bt_to_errno(ver->status);
-		kfree_skb(skb);
-		return err;
-	}
 
 	/* The hardware platform number has a fixed value of 0x37 and
 	 * for now only accept this single value.
@@ -2439,13 +2035,6 @@
 	}
 
 	params = (struct intel_boot_params *)skb->data;
-	if (params->status) {
-		BT_ERR("%s: Intel boot parameters command failure (%02x)",
-		       hdev->name, params->status);
-		err = -bt_to_errno(params->status);
-		kfree_skb(skb);
-		return err;
-	}
 
 	BT_INFO("%s: Device revision is %u", hdev->name,
 		le16_to_cpu(params->dev_revid));
@@ -2495,6 +2084,12 @@
 
 	BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
 
+	/* Save the DDC file name for later use to apply once the firmware
+	 * downloading is done.
+	 */
+	snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc",
+		 le16_to_cpu(params->dev_revid));
+
 	kfree_skb(skb);
 
 	if (fw->size < 644) {
@@ -2537,24 +2132,33 @@
 	}
 
 	fw_ptr = fw->data + 644;
+	frag_len = 0;
 
 	while (fw_ptr - fw->data < fw->size) {
-		struct hci_command_hdr *cmd = (void *)fw_ptr;
-		u8 cmd_len;
+		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
 
-		cmd_len = sizeof(*cmd) + cmd->plen;
+		frag_len += sizeof(*cmd) + cmd->plen;
 
-		/* Send each command from the firmware data buffer as
-		 * a single Data fragment.
+		/* The paramter length of the secure send command requires
+		 * a 4 byte alignment. It happens so that the firmware file
+		 * contains proper Intel_NOP commands to align the fragments
+		 * as needed.
+		 *
+		 * Send set of commands with 4 byte alignment from the
+		 * firmware data buffer as a single Data fragement.
 		 */
-		err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
-		if (err < 0) {
-			BT_ERR("%s: Failed to send firmware data (%d)",
-			       hdev->name, err);
-			goto done;
-		}
+		if (!(frag_len % 4)) {
+			err = btusb_intel_secure_send(hdev, 0x01, frag_len,
+						      fw_ptr);
+			if (err < 0) {
+				BT_ERR("%s: Failed to send firmware data (%d)",
+				       hdev->name, err);
+				goto done;
+			}
 
-		fw_ptr += cmd_len;
+			fw_ptr += frag_len;
+			frag_len = 0;
+		}
 	}
 
 	set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
@@ -2647,6 +2251,43 @@
 
 	clear_bit(BTUSB_BOOTLOADER, &data->flags);
 
+	/* Once the device is running in operational mode, it needs to apply
+	 * the device configuration (DDC) parameters.
+	 *
+	 * The device can work without DDC parameters, so even if it fails
+	 * to load the file, no need to fail the setup.
+	 */
+	err = request_firmware_direct(&fw, fwname, &hdev->dev);
+	if (err < 0)
+		return 0;
+
+	BT_INFO("%s: Found Intel DDC parameters: %s", hdev->name, fwname);
+
+	fw_ptr = fw->data;
+
+	/* DDC file contains one or more DDC structure which has
+	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
+	 */
+	while (fw->size > fw_ptr - fw->data) {
+		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
+
+		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
+				     HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb)) {
+			BT_ERR("%s: Failed to send Intel_Write_DDC (%ld)",
+			       hdev->name, PTR_ERR(skb));
+			release_firmware(fw);
+			return PTR_ERR(skb);
+		}
+
+		fw_ptr += cmd_plen;
+		kfree_skb(skb);
+	}
+
+	release_firmware(fw);
+
+	BT_INFO("%s: Applying Intel DDC parameters completed", hdev->name);
+
 	return 0;
 }
 
@@ -2678,13 +2319,6 @@
 		return;
 	}
 
-	if (skb->data[0] != 0x00) {
-		BT_ERR("%s: Exception info command failure (%02x)",
-		       hdev->name, skb->data[0]);
-		kfree_skb(skb);
-		return;
-	}
-
 	BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
 
 	kfree_skb(skb);
@@ -2792,6 +2426,7 @@
 static const struct qca_device_info qca_devices_table[] = {
 	{ 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
 	{ 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
+	{ 0x00000200, 28, 4, 18 }, /* Rome 2.0 */
 	{ 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
 	{ 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
 	{ 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
@@ -3175,8 +2810,17 @@
 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
 	}
 
-	if (id->driver_info & BTUSB_REALTEK)
-		hdev->setup = btusb_setup_realtek;
+#ifdef CONFIG_BT_HCIBTUSB_RTL
+	if (id->driver_info & BTUSB_REALTEK) {
+		hdev->setup = btrtl_setup_realtek;
+
+		/* Realtek devices lose their updated firmware over suspend,
+		 * but the USB hub doesn't notice any status change.
+		 * Explicitly request a device reset on resume.
+		 */
+		set_bit(BTUSB_RESET_RESUME, &data->flags);
+	}
+#endif
 
 	if (id->driver_info & BTUSB_AMP) {
 		/* AMP controllers do not support SCO packets */
@@ -3308,6 +2952,14 @@
 	btusb_stop_traffic(data);
 	usb_kill_anchored_urbs(&data->tx_anchor);
 
+	/* Optionally request a device reset on resume, but only when
+	 * wakeups are disabled. If wakeups are enabled we assume the
+	 * device will stay powered up throughout suspend.
+	 */
+	if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
+	    !device_may_wakeup(&data->udev->dev))
+		data->udev->reset_resume = 1;
+
 	return 0;
 }
 
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 55c135b..7a722df 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -22,7 +22,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
  */
-#define DEBUG
+
 #include <linux/platform_device.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index ec8fa0e..6da5e4c 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -192,6 +192,7 @@
 	if (IS_ERR(ath->rx_skb)) {
 		int err = PTR_ERR(ath->rx_skb);
 		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		ath->rx_skb = NULL;
 		return err;
 	}
 
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 1ec0b4a..23523e1 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -36,6 +37,55 @@
 	struct sk_buff_head txq;
 };
 
+static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+	struct hci_dev *hdev = hu->hdev;
+	struct sk_buff *skb;
+	struct bcm_update_uart_baud_rate param;
+
+	if (speed > 3000000) {
+		struct bcm_write_uart_clock_setting clock;
+
+		clock.type = BCM_UART_CLOCK_48MHZ;
+
+		BT_DBG("%s: Set Controller clock (%d)", hdev->name, clock.type);
+
+		/* This Broadcom specific command changes the UART's controller
+		 * clock for baud rate > 3000000.
+		 */
+		skb = __hci_cmd_sync(hdev, 0xfc45, 1, &clock, HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb)) {
+			int err = PTR_ERR(skb);
+			BT_ERR("%s: BCM: failed to write clock command (%d)",
+			       hdev->name, err);
+			return err;
+		}
+
+		kfree_skb(skb);
+	}
+
+	BT_DBG("%s: Set Controller UART speed to %d bit/s", hdev->name, speed);
+
+	param.zero = cpu_to_le16(0);
+	param.baud_rate = cpu_to_le32(speed);
+
+	/* This Broadcom specific command changes the UART's controller baud
+	 * rate.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc18, sizeof(param), &param,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		int err = PTR_ERR(skb);
+		BT_ERR("%s: BCM: failed to write update baudrate command (%d)",
+		       hdev->name, err);
+		return err;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
@@ -79,11 +129,62 @@
 
 static int bcm_setup(struct hci_uart *hu)
 {
+	char fw_name[64];
+	const struct firmware *fw;
+	unsigned int speed;
+	int err;
+
 	BT_DBG("hu %p", hu);
 
 	hu->hdev->set_bdaddr = btbcm_set_bdaddr;
 
-	return btbcm_setup_patchram(hu->hdev);
+	err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name));
+	if (err)
+		return err;
+
+	err = request_firmware(&fw, fw_name, &hu->hdev->dev);
+	if (err < 0) {
+		BT_INFO("%s: BCM: Patch %s not found", hu->hdev->name, fw_name);
+		return 0;
+	}
+
+	err = btbcm_patchram(hu->hdev, fw);
+	if (err) {
+		BT_INFO("%s: BCM: Patch failed (%d)", hu->hdev->name, err);
+		goto finalize;
+	}
+
+	/* Init speed if any */
+	if (hu->init_speed)
+		speed = hu->init_speed;
+	else if (hu->proto->init_speed)
+		speed = hu->proto->init_speed;
+	else
+		speed = 0;
+
+	if (speed)
+		hci_uart_set_baudrate(hu, speed);
+
+	/* Operational speed if any */
+	if (hu->oper_speed)
+		speed = hu->oper_speed;
+	else if (hu->proto->oper_speed)
+		speed = hu->proto->oper_speed;
+	else
+		speed = 0;
+
+	if (speed) {
+		err = bcm_set_baudrate(hu, speed);
+		if (!err)
+			hci_uart_set_baudrate(hu, speed);
+	}
+
+finalize:
+	release_firmware(fw);
+
+	err = btbcm_finalize(hu->hdev);
+
+	return err;
 }
 
 static const struct h4_recv_pkt bcm_recv_pkts[] = {
@@ -104,6 +205,7 @@
 	if (IS_ERR(bcm->rx_skb)) {
 		int err = PTR_ERR(bcm->rx_skb);
 		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		bcm->rx_skb = NULL;
 		return err;
 	}
 
@@ -133,10 +235,13 @@
 static const struct hci_uart_proto bcm_proto = {
 	.id		= HCI_UART_BCM,
 	.name		= "BCM",
+	.init_speed	= 115200,
+	.oper_speed	= 4000000,
 	.open		= bcm_open,
 	.close		= bcm_close,
 	.flush		= bcm_flush,
 	.setup		= bcm_setup,
+	.set_baudrate	= bcm_set_baudrate,
 	.recv		= bcm_recv,
 	.enqueue	= bcm_enqueue,
 	.dequeue	= bcm_dequeue,
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index dc8e3d4..d0b615a 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -47,8 +47,8 @@
 
 #include "hci_uart.h"
 
-static bool txcrc = 1;
-static bool hciextn = 1;
+static bool txcrc = true;
+static bool hciextn = true;
 
 #define BCSP_TXWINSIZE	4
 
@@ -436,7 +436,7 @@
 			break;
 		default:
 			memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1);
-			if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && 
+			if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
 					bcsp->rx_state != BCSP_W4_CRC)
 				bcsp_crc_update(&bcsp->message_crc, byte);
 			bcsp->rx_count--;
@@ -447,24 +447,24 @@
 		switch (byte) {
 		case 0xdc:
 			memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1);
-			if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && 
+			if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
 					bcsp->rx_state != BCSP_W4_CRC)
-				bcsp_crc_update(&bcsp-> message_crc, 0xc0);
+				bcsp_crc_update(&bcsp->message_crc, 0xc0);
 			bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
 			bcsp->rx_count--;
 			break;
 
 		case 0xdd:
 			memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
-			if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && 
+			if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
 					bcsp->rx_state != BCSP_W4_CRC) 
-				bcsp_crc_update(&bcsp-> message_crc, 0xdb);
+				bcsp_crc_update(&bcsp->message_crc, 0xdb);
 			bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
 			bcsp->rx_count--;
 			break;
 
 		default:
-			BT_ERR ("Invalid byte %02x after esc byte", byte);
+			BT_ERR("Invalid byte %02x after esc byte", byte);
 			kfree_skb(bcsp->rx_skb);
 			bcsp->rx_skb = NULL;
 			bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -527,7 +527,7 @@
 
 				hci_recv_frame(hu->hdev, bcsp->rx_skb);
 			} else {
-				BT_ERR ("Packet for unknown channel (%u %s)",
+				BT_ERR("Packet for unknown channel (%u %s)",
 					bcsp->rx_skb->data[1] & 0x0f,
 					bcsp->rx_skb->data[0] & 0x80 ? 
 					"reliable" : "unreliable");
@@ -587,7 +587,7 @@
 			}
 			if (bcsp->rx_skb->data[0] & 0x80	/* reliable pkt */
 			    		&& (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
-				BT_ERR ("Out-of-order packet arrived, got %u expected %u",
+				BT_ERR("Out-of-order packet arrived, got %u expected %u",
 					bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
 
 				kfree_skb(bcsp->rx_skb);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index f7190f0..57faddc 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -133,6 +133,7 @@
 	if (IS_ERR(h4->rx_skb)) {
 		int err = PTR_ERR(h4->rx_skb);
 		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		h4->rx_skb = NULL;
 		return err;
 	}
 
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 5c9a73f..177dd69 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -40,6 +40,7 @@
 #include <linux/signal.h>
 #include <linux/ioctl.h>
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -265,11 +266,133 @@
 	return 0;
 }
 
+/* Flow control or un-flow control the device */
+void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
+{
+	struct tty_struct *tty = hu->tty;
+	struct ktermios ktermios;
+	int status;
+	unsigned int set = 0;
+	unsigned int clear = 0;
+
+	if (enable) {
+		/* Disable hardware flow control */
+		ktermios = tty->termios;
+		ktermios.c_cflag &= ~CRTSCTS;
+		status = tty_set_termios(tty, &ktermios);
+		BT_DBG("Disabling hardware flow control: %s",
+		       status ? "failed" : "success");
+
+		/* Clear RTS to prevent the device from sending */
+		/* Most UARTs need OUT2 to enable interrupts */
+		status = tty->driver->ops->tiocmget(tty);
+		BT_DBG("Current tiocm 0x%x", status);
+
+		set &= ~(TIOCM_OUT2 | TIOCM_RTS);
+		clear = ~set;
+		set &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 |
+		       TIOCM_OUT2 | TIOCM_LOOP;
+		clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 |
+			 TIOCM_OUT2 | TIOCM_LOOP;
+		status = tty->driver->ops->tiocmset(tty, set, clear);
+		BT_DBG("Clearing RTS: %s", status ? "failed" : "success");
+	} else {
+		/* Set RTS to allow the device to send again */
+		status = tty->driver->ops->tiocmget(tty);
+		BT_DBG("Current tiocm 0x%x", status);
+
+		set |= (TIOCM_OUT2 | TIOCM_RTS);
+		clear = ~set;
+		set &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 |
+		       TIOCM_OUT2 | TIOCM_LOOP;
+		clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 |
+			 TIOCM_OUT2 | TIOCM_LOOP;
+		status = tty->driver->ops->tiocmset(tty, set, clear);
+		BT_DBG("Setting RTS: %s", status ? "failed" : "success");
+
+		/* Re-enable hardware flow control */
+		ktermios = tty->termios;
+		ktermios.c_cflag |= CRTSCTS;
+		status = tty_set_termios(tty, &ktermios);
+		BT_DBG("Enabling hardware flow control: %s",
+		       status ? "failed" : "success");
+	}
+}
+
+void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
+			 unsigned int oper_speed)
+{
+	hu->init_speed = init_speed;
+	hu->oper_speed = oper_speed;
+}
+
+void hci_uart_init_tty(struct hci_uart *hu)
+{
+	struct tty_struct *tty = hu->tty;
+	struct ktermios ktermios;
+
+	/* Bring the UART into a known 8 bits no parity hw fc state */
+	ktermios = tty->termios;
+	ktermios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP |
+			      INLCR | IGNCR | ICRNL | IXON);
+	ktermios.c_oflag &= ~OPOST;
+	ktermios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+	ktermios.c_cflag &= ~(CSIZE | PARENB);
+	ktermios.c_cflag |= CS8;
+	ktermios.c_cflag |= CRTSCTS;
+
+	/* tty_set_termios() return not checked as it is always 0 */
+	tty_set_termios(tty, &ktermios);
+}
+
+void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+	struct tty_struct *tty = hu->tty;
+	struct ktermios ktermios;
+
+	ktermios = tty->termios;
+	ktermios.c_cflag &= ~CBAUD;
+	tty_termios_encode_baud_rate(&ktermios, speed, speed);
+
+	/* tty_set_termios() return not checked as it is always 0 */
+	tty_set_termios(tty, &ktermios);
+
+	BT_DBG("%s: New tty speeds: %d/%d", hu->hdev->name,
+	       tty->termios.c_ispeed, tty->termios.c_ospeed);
+}
+
 static int hci_uart_setup(struct hci_dev *hdev)
 {
 	struct hci_uart *hu = hci_get_drvdata(hdev);
 	struct hci_rp_read_local_version *ver;
 	struct sk_buff *skb;
+	unsigned int speed;
+	int err;
+
+	/* Init speed if any */
+	if (hu->init_speed)
+		speed = hu->init_speed;
+	else if (hu->proto->init_speed)
+		speed = hu->proto->init_speed;
+	else
+		speed = 0;
+
+	if (speed)
+		hci_uart_set_baudrate(hu, speed);
+
+	/* Operational speed if any */
+	if (hu->oper_speed)
+		speed = hu->oper_speed;
+	else if (hu->proto->oper_speed)
+		speed = hu->proto->oper_speed;
+	else
+		speed = 0;
+
+	if (hu->proto->set_baudrate && speed) {
+		err = hu->proto->set_baudrate(hu, speed);
+		if (!err)
+			hci_uart_set_baudrate(hu, speed);
+	}
 
 	if (hu->proto->setup)
 		return hu->proto->setup(hu);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 72120a5..ce9c670 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -58,10 +58,13 @@
 struct hci_uart_proto {
 	unsigned int id;
 	const char *name;
+	unsigned int init_speed;
+	unsigned int oper_speed;
 	int (*open)(struct hci_uart *hu);
 	int (*close)(struct hci_uart *hu);
 	int (*flush)(struct hci_uart *hu);
 	int (*setup)(struct hci_uart *hu);
+	int (*set_baudrate)(struct hci_uart *hu, unsigned int speed);
 	int (*recv)(struct hci_uart *hu, const void *data, int len);
 	int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
 	struct sk_buff *(*dequeue)(struct hci_uart *hu);
@@ -82,6 +85,9 @@
 	struct sk_buff		*tx_skb;
 	unsigned long		tx_state;
 	spinlock_t		rx_lock;
+
+	unsigned int init_speed;
+	unsigned int oper_speed;
 };
 
 /* HCI_UART proto flag bits */
@@ -96,6 +102,11 @@
 int hci_uart_unregister_proto(const struct hci_uart_proto *p);
 int hci_uart_tx_wakeup(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
+void hci_uart_init_tty(struct hci_uart *hu);
+void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
+void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
+			 unsigned int oper_speed);
 
 #ifdef CONFIG_BT_HCIUART_H4
 int h4_init(void);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 6653473..78653db 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -366,7 +366,7 @@
 	.llseek		= no_llseek,
 };
 
-static struct miscdevice vhci_miscdev= {
+static struct miscdevice vhci_miscdev = {
 	.name	= "vhci",
 	.fops	= &vhci_fops,
 	.minor	= VHCI_MINOR,
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index a50936a..5639699 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -140,12 +140,47 @@
 	},
 };
 
+#define NSS_PLL_RATE(f, _l, _m, _n, i) \
+	{  \
+		.freq = f,  \
+		.l = _l, \
+		.m = _m, \
+		.n = _n, \
+		.ibits = i, \
+	}
+
+static struct pll_freq_tbl pll18_freq_tbl[] = {
+	NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625),
+	NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625),
+};
+
+static struct clk_pll pll18 = {
+	.l_reg = 0x31a4,
+	.m_reg = 0x31a8,
+	.n_reg = 0x31ac,
+	.config_reg = 0x31b4,
+	.mode_reg = 0x31a0,
+	.status_reg = 0x31b8,
+	.status_bit = 16,
+	.post_div_shift = 16,
+	.post_div_width = 1,
+	.freq_tbl = pll18_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll18",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
 enum {
 	P_PXO,
 	P_PLL8,
 	P_PLL3,
 	P_PLL0,
 	P_CXO,
+	P_PLL14,
+	P_PLL18,
 };
 
 static const struct parent_map gcc_pxo_pll8_map[] = {
@@ -197,6 +232,22 @@
 	"pll0_vote",
 };
 
+static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
+	{ P_PXO, 0 },
+	{ P_PLL8, 4 },
+	{ P_PLL0, 2 },
+	{ P_PLL14, 5 },
+	{ P_PLL18, 1 }
+};
+
+static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = {
+	"pxo",
+	"pll8_vote",
+	"pll0_vote",
+	"pll14",
+	"pll18",
+};
+
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
 	{  1843200, P_PLL8, 2,  6, 625 },
 	{  3686400, P_PLL8, 2, 12, 625 },
@@ -2202,6 +2253,472 @@
 	},
 };
 
+static const struct freq_tbl clk_tbl_gmac[] = {
+	{ 133000000, P_PLL0, 1,  50, 301 },
+	{ 266000000, P_PLL0, 1, 127, 382 },
+	{ }
+};
+
+static struct clk_dyn_rcg gmac_core1_src = {
+	.ns_reg[0] = 0x3cac,
+	.ns_reg[1] = 0x3cb0,
+	.md_reg[0] = 0x3ca4,
+	.md_reg[1] = 0x3ca8,
+	.bank_reg = 0x3ca0,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_gmac,
+	.clkr = {
+		.enable_reg = 0x3ca0,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core1_src",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch gmac_core1_clk = {
+	.halt_reg = 0x3c20,
+	.halt_bit = 4,
+	.hwcg_reg = 0x3cb4,
+	.hwcg_bit = 6,
+	.clkr = {
+		.enable_reg = 0x3cb4,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core1_clk",
+			.parent_names = (const char *[]){
+				"gmac_core1_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_dyn_rcg gmac_core2_src = {
+	.ns_reg[0] = 0x3ccc,
+	.ns_reg[1] = 0x3cd0,
+	.md_reg[0] = 0x3cc4,
+	.md_reg[1] = 0x3cc8,
+	.bank_reg = 0x3ca0,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_gmac,
+	.clkr = {
+		.enable_reg = 0x3cc0,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core2_src",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch gmac_core2_clk = {
+	.halt_reg = 0x3c20,
+	.halt_bit = 5,
+	.hwcg_reg = 0x3cd4,
+	.hwcg_bit = 6,
+	.clkr = {
+		.enable_reg = 0x3cd4,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core2_clk",
+			.parent_names = (const char *[]){
+				"gmac_core2_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_dyn_rcg gmac_core3_src = {
+	.ns_reg[0] = 0x3cec,
+	.ns_reg[1] = 0x3cf0,
+	.md_reg[0] = 0x3ce4,
+	.md_reg[1] = 0x3ce8,
+	.bank_reg = 0x3ce0,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_gmac,
+	.clkr = {
+		.enable_reg = 0x3ce0,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core3_src",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch gmac_core3_clk = {
+	.halt_reg = 0x3c20,
+	.halt_bit = 6,
+	.hwcg_reg = 0x3cf4,
+	.hwcg_bit = 6,
+	.clkr = {
+		.enable_reg = 0x3cf4,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core3_clk",
+			.parent_names = (const char *[]){
+				"gmac_core3_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_dyn_rcg gmac_core4_src = {
+	.ns_reg[0] = 0x3d0c,
+	.ns_reg[1] = 0x3d10,
+	.md_reg[0] = 0x3d04,
+	.md_reg[1] = 0x3d08,
+	.bank_reg = 0x3d00,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_gmac,
+	.clkr = {
+		.enable_reg = 0x3d00,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core4_src",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch gmac_core4_clk = {
+	.halt_reg = 0x3c20,
+	.halt_bit = 7,
+	.hwcg_reg = 0x3d14,
+	.hwcg_bit = 6,
+	.clkr = {
+		.enable_reg = 0x3d14,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gmac_core4_clk",
+			.parent_names = (const char *[]){
+				"gmac_core4_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_nss_tcm[] = {
+	{ 266000000, P_PLL0, 3, 0, 0 },
+	{ 400000000, P_PLL0, 2, 0, 0 },
+	{ }
+};
+
+static struct clk_dyn_rcg nss_tcm_src = {
+	.ns_reg[0] = 0x3dc4,
+	.ns_reg[1] = 0x3dc8,
+	.bank_reg = 0x3dc0,
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_nss_tcm,
+	.clkr = {
+		.enable_reg = 0x3dc0,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_tcm_src",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_tcm_clk = {
+	.halt_reg = 0x3c20,
+	.halt_bit = 14,
+	.clkr = {
+		.enable_reg = 0x3dd0,
+		.enable_mask = BIT(6) | BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_tcm_clk",
+			.parent_names = (const char *[]){
+				"nss_tcm_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_nss[] = {
+	{ 110000000, P_PLL18, 1, 1, 5 },
+	{ 275000000, P_PLL18, 2, 0, 0 },
+	{ 550000000, P_PLL18, 1, 0, 0 },
+	{ 733000000, P_PLL18, 1, 0, 0 },
+	{ }
+};
+
+static struct clk_dyn_rcg ubi32_core1_src_clk = {
+	.ns_reg[0] = 0x3d2c,
+	.ns_reg[1] = 0x3d30,
+	.md_reg[0] = 0x3d24,
+	.md_reg[1] = 0x3d28,
+	.bank_reg = 0x3d20,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_nss,
+	.clkr = {
+		.enable_reg = 0x3d20,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "ubi32_core1_src_clk",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		},
+	},
+};
+
+static struct clk_dyn_rcg ubi32_core2_src_clk = {
+	.ns_reg[0] = 0x3d4c,
+	.ns_reg[1] = 0x3d50,
+	.md_reg[0] = 0x3d44,
+	.md_reg[1] = 0x3d48,
+	.bank_reg = 0x3d40,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.s[0] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+	},
+	.p[0] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.p[1] = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.mux_sel_bit = 0,
+	.freq_tbl = clk_tbl_nss,
+	.clkr = {
+		.enable_reg = 0x3d40,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "ubi32_core2_src_clk",
+			.parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+			.num_parents = 5,
+			.ops = &clk_dyn_rcg_ops,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		},
+	},
+};
+
 static struct clk_regmap *gcc_ipq806x_clks[] = {
 	[PLL0] = &pll0.clkr,
 	[PLL0_VOTE] = &pll0_vote,
@@ -2211,6 +2728,7 @@
 	[PLL8_VOTE] = &pll8_vote,
 	[PLL14] = &pll14.clkr,
 	[PLL14_VOTE] = &pll14_vote,
+	[PLL18] = &pll18.clkr,
 	[GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
 	[GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
 	[GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
@@ -2307,6 +2825,18 @@
 	[USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
 	[EBI2_CLK] = &ebi2_clk.clkr,
 	[EBI2_AON_CLK] = &ebi2_aon_clk.clkr,
+	[GMAC_CORE1_CLK_SRC] = &gmac_core1_src.clkr,
+	[GMAC_CORE1_CLK] = &gmac_core1_clk.clkr,
+	[GMAC_CORE2_CLK_SRC] = &gmac_core2_src.clkr,
+	[GMAC_CORE2_CLK] = &gmac_core2_clk.clkr,
+	[GMAC_CORE3_CLK_SRC] = &gmac_core3_src.clkr,
+	[GMAC_CORE3_CLK] = &gmac_core3_clk.clkr,
+	[GMAC_CORE4_CLK_SRC] = &gmac_core4_src.clkr,
+	[GMAC_CORE4_CLK] = &gmac_core4_clk.clkr,
+	[UBI32_CORE1_CLK_SRC] = &ubi32_core1_src_clk.clkr,
+	[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
+	[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
+	[NSSTCM_CLK] = &nss_tcm_clk.clkr,
 };
 
 static const struct qcom_reset_map gcc_ipq806x_resets[] = {
@@ -2425,6 +2955,48 @@
 	[USB30_1_PHY_RESET] = { 0x3b58, 0 },
 	[NSSFB0_RESET] = { 0x3b60, 6 },
 	[NSSFB1_RESET] = { 0x3b60, 7 },
+	[UBI32_CORE1_CLKRST_CLAMP_RESET] = { 0x3d3c, 3},
+	[UBI32_CORE1_CLAMP_RESET] = { 0x3d3c, 2 },
+	[UBI32_CORE1_AHB_RESET] = { 0x3d3c, 1 },
+	[UBI32_CORE1_AXI_RESET] = { 0x3d3c, 0 },
+	[UBI32_CORE2_CLKRST_CLAMP_RESET] = { 0x3d5c, 3 },
+	[UBI32_CORE2_CLAMP_RESET] = { 0x3d5c, 2 },
+	[UBI32_CORE2_AHB_RESET] = { 0x3d5c, 1 },
+	[UBI32_CORE2_AXI_RESET] = { 0x3d5c, 0 },
+	[GMAC_CORE1_RESET] = { 0x3cbc, 0 },
+	[GMAC_CORE2_RESET] = { 0x3cdc, 0 },
+	[GMAC_CORE3_RESET] = { 0x3cfc, 0 },
+	[GMAC_CORE4_RESET] = { 0x3d1c, 0 },
+	[GMAC_AHB_RESET] = { 0x3e24, 0 },
+	[NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 },
+	[NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 },
+	[NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 },
+	[NSS_CH0_HW_RST_RX_125M_N_RESET] = { 0x3b60, 3 },
+	[NSS_CH0_RST_TX_125M_N_RESET] = { 0x3b60, 4 },
+	[NSS_CH1_RST_RX_CLK_N_RESET] = { 0x3b60, 5 },
+	[NSS_CH1_RST_TX_CLK_N_RESET] = { 0x3b60, 6 },
+	[NSS_CH1_RST_RX_125M_N_RESET] = { 0x3b60, 7 },
+	[NSS_CH1_HW_RST_RX_125M_N_RESET] = { 0x3b60, 8 },
+	[NSS_CH1_RST_TX_125M_N_RESET] = { 0x3b60, 9 },
+	[NSS_CH2_RST_RX_CLK_N_RESET] = { 0x3b60, 10 },
+	[NSS_CH2_RST_TX_CLK_N_RESET] = { 0x3b60, 11 },
+	[NSS_CH2_RST_RX_125M_N_RESET] = { 0x3b60, 12 },
+	[NSS_CH2_HW_RST_RX_125M_N_RESET] = { 0x3b60, 13 },
+	[NSS_CH2_RST_TX_125M_N_RESET] = { 0x3b60, 14 },
+	[NSS_CH3_RST_RX_CLK_N_RESET] = { 0x3b60, 15 },
+	[NSS_CH3_RST_TX_CLK_N_RESET] = { 0x3b60, 16 },
+	[NSS_CH3_RST_RX_125M_N_RESET] = { 0x3b60, 17 },
+	[NSS_CH3_HW_RST_RX_125M_N_RESET] = { 0x3b60, 18 },
+	[NSS_CH3_RST_TX_125M_N_RESET] = { 0x3b60, 19 },
+	[NSS_RST_RX_250M_125M_N_RESET] = { 0x3b60, 20 },
+	[NSS_RST_TX_250M_125M_N_RESET] = { 0x3b60, 21 },
+	[NSS_QSGMII_TXPI_RST_N_RESET] = { 0x3b60, 22 },
+	[NSS_QSGMII_CDR_RST_N_RESET] = { 0x3b60, 23 },
+	[NSS_SGMII2_CDR_RST_N_RESET] = { 0x3b60, 24 },
+	[NSS_SGMII3_CDR_RST_N_RESET] = { 0x3b60, 25 },
+	[NSS_CAL_PRBS_RST_N_RESET] = { 0x3b60, 26 },
+	[NSS_LCKDT_RST_N_RESET] = { 0x3b60, 27 },
+	[NSS_SRDS_N_RESET] = { 0x3b60, 28 },
 };
 
 static const struct regmap_config gcc_ipq806x_regmap_config = {
@@ -2453,6 +3025,8 @@
 {
 	struct clk *clk;
 	struct device *dev = &pdev->dev;
+	struct regmap *regmap;
+	int ret;
 
 	/* Temporary until RPM clocks supported */
 	clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000);
@@ -2463,7 +3037,25 @@
 	if (IS_ERR(clk))
 		return PTR_ERR(clk);
 
-	return qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+	ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+	if (ret)
+		return ret;
+
+	regmap = dev_get_regmap(dev, NULL);
+	if (!regmap)
+		return -ENODEV;
+
+	/* Setup PLL18 static bits */
+	regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400);
+	regmap_write(regmap, 0x31b0, 0x3080);
+
+	/* Set GMAC footswitch sleep/wakeup values */
+	regmap_write(regmap, 0x3cb8, 8);
+	regmap_write(regmap, 0x3cd8, 8);
+	regmap_write(regmap, 0x3cf8, 8);
+	regmap_write(regmap, 0x3d18, 8);
+
+	return 0;
 }
 
 static int gcc_ipq806x_remove(struct platform_device *pdev)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 62c816a..6eee3d3 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -453,10 +453,10 @@
 
 	cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
 	memset(stats, 0, sizeof *stats);
-	stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
-	stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
-	stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
-	stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
+	stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
+	stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
+	stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
+	stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
 
 	return 0;
 }
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0f00204..21cb41a 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -189,7 +189,7 @@
 {
 	int i;
 	u64 guid_indexes;
-	int slave_id;
+	int slave_id, slave_port;
 	enum slave_port_state new_state;
 	enum slave_port_state prev_state;
 	__be64 tmp_cur_ag, form_cache_ag;
@@ -217,6 +217,11 @@
 		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
 		if (slave_id >= dev->dev->persist->num_vfs + 1)
 			return;
+
+		slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
+		if (slave_port < 0) /* this port isn't available for the VF */
+			continue;
+
 		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
 		form_cache_ag = get_cached_alias_guid(dev, port_num,
 					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 3e2dee4..85a50df 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,14 +64,6 @@
 #define GUID_TBL_BLK_NUM_ENTRIES 8
 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
 
-/* Counters should be saturate once they reach their maximum value */
-#define ASSIGN_32BIT_COUNTER(counter, value) do {\
-	if ((value) > U32_MAX)			 \
-		counter = cpu_to_be32(U32_MAX); \
-	else					 \
-		counter = cpu_to_be32(value);	 \
-} while (0)
-
 struct mlx4_mad_rcv_buf {
 	struct ib_grh grh;
 	u8 payload[256];
@@ -830,31 +822,25 @@
 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 			const struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_counter counter_stats;
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	int err;
-	u32 inmod = dev->counters[port_num - 1] & 0xffff;
-	u8 mode;
 
 	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
 		return -EINVAL;
 
-	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
-	if (IS_ERR(mailbox))
-		return IB_MAD_RESULT_FAILURE;
-
-	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
-			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
-			   MLX4_CMD_WRAPPED);
+	memset(&counter_stats, 0, sizeof(counter_stats));
+	err = mlx4_get_counter_stats(dev->dev,
+				     dev->counters[port_num - 1].index,
+				     &counter_stats, 0);
 	if (err)
 		err = IB_MAD_RESULT_FAILURE;
 	else {
 		memset(out_mad->data, 0, sizeof out_mad->data);
-		mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
-		switch (mode & 0xf) {
+		switch (counter_stats.counter_mode & 0xf) {
 		case 0:
-			edit_counter(mailbox->buf,
-						(void *)(out_mad->data + 40));
+			edit_counter(&counter_stats,
+				     (void *)(out_mad->data + 40));
 			err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 			break;
 		default:
@@ -862,8 +848,6 @@
 		}
 	}
 
-	mlx4_free_cmd_mailbox(dev->dev, mailbox);
-
 	return err;
 }
 
@@ -873,6 +857,7 @@
 			struct ib_mad_hdr *out, size_t *out_mad_size,
 			u16 *out_mad_pkey_index)
 {
+	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	const struct ib_mad *in_mad = (const struct ib_mad *)in;
 	struct ib_mad *out_mad = (struct ib_mad *)out;
 
@@ -881,8 +866,9 @@
 
 	switch (rdma_port_get_link_layer(ibdev, port_num)) {
 	case IB_LINK_LAYER_INFINIBAND:
-		return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
-				      in_grh, in_mad, out_mad);
+		if (!mlx4_is_slave(dev->dev))
+			return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+					      in_grh, in_mad, out_mad);
 	case IB_LINK_LAYER_ETHERNET:
 		return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
 					  in_grh, in_mad, out_mad);
@@ -1375,14 +1361,17 @@
 	 * stadard address handle by decoding the tunnelled mlx4_ah fields */
 	memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
 	ah.ibah.device = ctx->ib_dev;
+
+	port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
+	port = mlx4_slave_convert_port(dev->dev, slave, port);
+	if (port < 0)
+		return;
+	ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+
 	mlx4_ib_query_ah(&ah.ibah, &ah_attr);
 	if (ah_attr.ah_flags & IB_AH_GRH)
 		fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
 
-	port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
-	if (port < 0)
-		return;
-	ah_attr.port_num = port;
 	memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
 	ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
 	/* if slave have default vlan use it */
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 166da78..067a691 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1146,7 +1146,7 @@
 
 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
-			   MLX4_CMD_NATIVE);
+			   MLX4_CMD_WRAPPED);
 	if (ret == -ENOMEM)
 		pr_err("mcg table is full. Fail to register network rule.\n");
 	else if (ret == -ENXIO)
@@ -1163,7 +1163,7 @@
 	int err;
 	err = mlx4_cmd(dev, reg_id, 0, 0,
 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
-		       MLX4_CMD_NATIVE);
+		       MLX4_CMD_WRAPPED);
 	if (err)
 		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
 		       reg_id);
@@ -2098,77 +2098,52 @@
 
 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
-	char name[80];
-	int eq_per_port = 0;
-	int added_eqs = 0;
-	int total_eqs = 0;
-	int i, j, eq;
+	int i, j, eq = 0, total_eqs = 0;
 
-	/* Legacy mode or comp_pool is not large enough */
-	if (dev->caps.comp_pool == 0 ||
-	    dev->caps.num_ports > dev->caps.comp_pool)
-		return;
-
-	eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
-
-	/* Init eq table */
-	added_eqs = 0;
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-		added_eqs += eq_per_port;
-
-	total_eqs = dev->caps.num_comp_vectors + added_eqs;
-
-	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
+				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
 	if (!ibdev->eq_table)
 		return;
 
-	ibdev->eq_added = added_eqs;
-
-	eq = 0;
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
-		for (j = 0; j < eq_per_port; j++) {
-			snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
-				 i, j, dev->persist->pdev->bus->name);
-			/* Set IRQ for specific name (per ring) */
-			if (mlx4_assign_eq(dev, name, NULL,
-					   &ibdev->eq_table[eq])) {
-				/* Use legacy (same as mlx4_en driver) */
-				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
-				ibdev->eq_table[eq] =
-					(eq % dev->caps.num_comp_vectors);
-			}
-			eq++;
+	for (i = 1; i <= dev->caps.num_ports; i++) {
+		for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
+		     j++, total_eqs++) {
+			if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
+				continue;
+			ibdev->eq_table[eq] = total_eqs;
+			if (!mlx4_assign_eq(dev, i,
+					    &ibdev->eq_table[eq]))
+				eq++;
+			else
+				ibdev->eq_table[eq] = -1;
 		}
 	}
 
-	/* Fill the reset of the vector with legacy EQ */
-	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
-		ibdev->eq_table[eq++] = i;
+	for (i = eq; i < dev->caps.num_comp_vectors;
+	     ibdev->eq_table[i++] = -1)
+		;
 
 	/* Advertise the new number of EQs to clients */
-	ibdev->ib_dev.num_comp_vectors = total_eqs;
+	ibdev->ib_dev.num_comp_vectors = eq;
 }
 
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
 	int i;
+	int total_eqs = ibdev->ib_dev.num_comp_vectors;
 
-	/* no additional eqs were added */
+	/* no eqs were allocated */
 	if (!ibdev->eq_table)
 		return;
 
 	/* Reset the advertised EQ number */
-	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+	ibdev->ib_dev.num_comp_vectors = 0;
 
-	/* Free only the added eqs */
-	for (i = 0; i < ibdev->eq_added; i++) {
-		/* Don't free legacy eqs if used */
-		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
-			continue;
+	for (i = 0; i < total_eqs; i++)
 		mlx4_release_eq(dev, ibdev->eq_table[i]);
-	}
 
 	kfree(ibdev->eq_table);
+	ibdev->eq_table = NULL;
 }
 
 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -2203,6 +2178,8 @@
 	struct mlx4_ib_iboe *iboe;
 	int ib_num_ports = 0;
 	int num_req_counters;
+	int allocated;
+	u32 counter_index;
 
 	pr_info_once("%s", mlx4_ib_version);
 
@@ -2373,19 +2350,31 @@
 	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
 	for (i = 0; i < num_req_counters; ++i) {
 		mutex_init(&ibdev->qp1_proxy_lock[i]);
+		allocated = 0;
 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
 						IB_LINK_LAYER_ETHERNET) {
-			err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
+			err = mlx4_counter_alloc(ibdev->dev, &counter_index);
+			/* if failed to allocate a new counter, use default */
 			if (err)
-				ibdev->counters[i] = -1;
-		} else {
-			ibdev->counters[i] = -1;
+				counter_index =
+					mlx4_get_default_counter_index(dev,
+								       i + 1);
+			else
+				allocated = 1;
+		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
+			counter_index = mlx4_get_default_counter_index(dev,
+								       i + 1);
 		}
+		ibdev->counters[i].index = counter_index;
+		ibdev->counters[i].allocated = allocated;
+		pr_info("counter index %d for port %d allocated %d\n",
+			counter_index, i + 1, allocated);
 	}
 	if (mlx4_is_bonded(dev))
-		for (i = 1; i < ibdev->num_ports ; ++i)
-			ibdev->counters[i] = ibdev->counters[0];
-
+		for (i = 1; i < ibdev->num_ports ; ++i) {
+			ibdev->counters[i].index = ibdev->counters[0].index;
+			ibdev->counters[i].allocated = 0;
+		}
 
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
 		ib_num_ports++;
@@ -2525,10 +2514,12 @@
 		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
 				      ibdev->steer_qpn_count);
 err_counter:
-	for (; i; --i)
-		if (ibdev->counters[i - 1] != -1)
-			mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
-
+	for (i = 0; i < ibdev->num_ports; ++i) {
+		if (ibdev->counters[i].index != -1 &&
+		    ibdev->counters[i].allocated)
+			mlx4_counter_free(ibdev->dev,
+					  ibdev->counters[i].index);
+	}
 err_map:
 	iounmap(ibdev->uar_map);
 
@@ -2645,8 +2636,9 @@
 
 	iounmap(ibdev->uar_map);
 	for (p = 0; p < ibdev->num_ports; ++p)
-		if (ibdev->counters[p] != -1)
-			mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+		if (ibdev->counters[p].index != -1 &&
+		    ibdev->counters[p].allocated)
+			mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
 		mlx4_CLOSE_PORT(dev, p);
 
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7933adf..334387f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -504,6 +504,11 @@
 	struct mlx4_ib_iov_sysfs_attr mcg_dentry;
 };
 
+struct counter_index {
+	u32		index;
+	u8		allocated;
+};
+
 struct mlx4_ib_dev {
 	struct ib_device	ib_dev;
 	struct mlx4_dev	       *dev;
@@ -522,9 +527,8 @@
 	struct mutex		cap_mask_mutex;
 	bool			ib_active;
 	struct mlx4_ib_iboe	iboe;
-	int			counters[MLX4_MAX_PORTS];
+	struct counter_index    counters[MLX4_MAX_PORTS];
 	int		       *eq_table;
-	int			eq_added;
 	struct kobject	       *iov_parent;
 	struct kobject	       *ports_parent;
 	struct kobject	       *dev_ports_parent[MLX4_MFUNC_MAX];
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 02fc91c6..c5a3a5f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1539,12 +1539,13 @@
 	}
 
 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
-		if (dev->counters[qp->port - 1] != -1) {
+		if (dev->counters[qp->port - 1].index != -1) {
 			context->pri_path.counter_index =
-						dev->counters[qp->port - 1];
+					dev->counters[qp->port - 1].index;
 			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
 		} else
-			context->pri_path.counter_index = 0xff;
+			context->pri_path.counter_index =
+				MLX4_SINK_COUNTER_INDEX(dev->dev);
 
 		if (qp->flags & MLX4_IB_QP_NETIF) {
 			mlx4_ib_steer_qp_reg(dev, qp, 1);
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 10df386..bce263b 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,8 +1,6 @@
 config MLX5_INFINIBAND
 	tristate "Mellanox Connect-IB HCA support"
-	depends on NETDEVICES && ETHERNET && PCI
-	select NET_VENDOR_MELLANOX
-	select MLX5_CORE
+	depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
 	---help---
 	  This driver provides low-level InfiniBand support for
 	  Mellanox Connect-IB PCI Express host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 09fbae6..5c9eeea 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -590,8 +590,7 @@
 {
 	int err;
 
-	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
-			     PAGE_SIZE * 2, &buf->buf);
+	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
 	if (err)
 		return err;
 
@@ -760,7 +759,7 @@
 		return ERR_PTR(-EINVAL);
 
 	entries = roundup_pow_of_two(entries + 1);
-	if (entries > dev->mdev->caps.gen.max_cqes)
+	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
 		return ERR_PTR(-EINVAL);
 
 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -927,7 +926,7 @@
 	int err;
 	u32 fsel;
 
-	if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+	if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
 		return -ENOSYS;
 
 	in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1082,7 +1081,7 @@
 	int uninitialized_var(cqe_size);
 	unsigned long flags;
 
-	if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+	if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
 		pr_info("Firmware does not support resize CQ\n");
 		return -ENOSYS;
 	}
@@ -1091,7 +1090,7 @@
 		return -EINVAL;
 
 	entries = roundup_pow_of_two(entries + 1);
-	if (entries > dev->mdev->caps.gen.max_cqes + 1)
+	if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
 		return -EINVAL;
 
 	if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 8e45714..01fc97d 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -136,7 +136,7 @@
 
 	packet_error = be16_to_cpu(out_mad->status);
 
-	dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
+	dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
 		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
 
 out:
@@ -144,3 +144,300 @@
 	kfree(out_mad);
 	return err;
 }
+
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+					  struct ib_smp *out_mad)
+{
+	struct ib_smp *in_mad = NULL;
+	int err = -ENOMEM;
+
+	in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	if (!in_mad)
+		return -ENOMEM;
+
+	init_query_mad(in_mad);
+	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
+			   out_mad);
+
+	kfree(in_mad);
+	return err;
+}
+
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+					 __be64 *sys_image_guid)
+{
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!out_mad)
+		return -ENOMEM;
+
+	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+	if (err)
+		goto out;
+
+	memcpy(sys_image_guid, out_mad->data + 4, 8);
+
+out:
+	kfree(out_mad);
+
+	return err;
+}
+
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+				 u16 *max_pkeys)
+{
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!out_mad)
+		return -ENOMEM;
+
+	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+	if (err)
+		goto out;
+
+	*max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
+
+out:
+	kfree(out_mad);
+
+	return err;
+}
+
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+				 u32 *vendor_id)
+{
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!out_mad)
+		return -ENOMEM;
+
+	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+	if (err)
+		goto out;
+
+	*vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
+
+out:
+	kfree(out_mad);
+
+	return err;
+}
+
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+	struct ib_smp *in_mad  = NULL;
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!in_mad || !out_mad)
+		goto out;
+
+	init_query_mad(in_mad);
+	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+	if (err)
+		goto out;
+
+	memcpy(node_desc, out_mad->data, 64);
+out:
+	kfree(in_mad);
+	kfree(out_mad);
+	return err;
+}
+
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
+{
+	struct ib_smp *in_mad  = NULL;
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!in_mad || !out_mad)
+		goto out;
+
+	init_query_mad(in_mad);
+	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+	if (err)
+		goto out;
+
+	memcpy(node_guid, out_mad->data + 12, 8);
+out:
+	kfree(in_mad);
+	kfree(out_mad);
+	return err;
+}
+
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+			    u16 *pkey)
+{
+	struct ib_smp *in_mad  = NULL;
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!in_mad || !out_mad)
+		goto out;
+
+	init_query_mad(in_mad);
+	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
+	in_mad->attr_mod = cpu_to_be32(index / 32);
+
+	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+			   out_mad);
+	if (err)
+		goto out;
+
+	*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+
+out:
+	kfree(in_mad);
+	kfree(out_mad);
+	return err;
+}
+
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+			    union ib_gid *gid)
+{
+	struct ib_smp *in_mad  = NULL;
+	struct ib_smp *out_mad = NULL;
+	int err = -ENOMEM;
+
+	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!in_mad || !out_mad)
+		goto out;
+
+	init_query_mad(in_mad);
+	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+	in_mad->attr_mod = cpu_to_be32(port);
+
+	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+			   out_mad);
+	if (err)
+		goto out;
+
+	memcpy(gid->raw, out_mad->data + 8, 8);
+
+	init_query_mad(in_mad);
+	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
+	in_mad->attr_mod = cpu_to_be32(index / 8);
+
+	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+			   out_mad);
+	if (err)
+		goto out;
+
+	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+	kfree(in_mad);
+	kfree(out_mad);
+	return err;
+}
+
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+			    struct ib_port_attr *props)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	struct mlx5_core_dev *mdev = dev->mdev;
+	struct ib_smp *in_mad  = NULL;
+	struct ib_smp *out_mad = NULL;
+	int ext_active_speed;
+	int err = -ENOMEM;
+
+	if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+		mlx5_ib_warn(dev, "invalid port number %d\n", port);
+		return -EINVAL;
+	}
+
+	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!in_mad || !out_mad)
+		goto out;
+
+	memset(props, 0, sizeof(*props));
+
+	init_query_mad(in_mad);
+	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+	in_mad->attr_mod = cpu_to_be32(port);
+
+	err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
+	if (err) {
+		mlx5_ib_warn(dev, "err %d\n", err);
+		goto out;
+	}
+
+	props->lid		= be16_to_cpup((__be16 *)(out_mad->data + 16));
+	props->lmc		= out_mad->data[34] & 0x7;
+	props->sm_lid		= be16_to_cpup((__be16 *)(out_mad->data + 18));
+	props->sm_sl		= out_mad->data[36] & 0xf;
+	props->state		= out_mad->data[32] & 0xf;
+	props->phys_state	= out_mad->data[33] >> 4;
+	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
+	props->gid_tbl_len	= out_mad->data[50];
+	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+	props->pkey_tbl_len	= mdev->port_caps[port - 1].pkey_table_len;
+	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
+	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
+	props->active_width	= out_mad->data[31] & 0xf;
+	props->active_speed	= out_mad->data[35] >> 4;
+	props->max_mtu		= out_mad->data[41] & 0xf;
+	props->active_mtu	= out_mad->data[36] >> 4;
+	props->subnet_timeout	= out_mad->data[51] & 0x1f;
+	props->max_vl_num	= out_mad->data[37] >> 4;
+	props->init_type_reply	= out_mad->data[41] >> 4;
+
+	/* Check if extended speeds (EDR/FDR/...) are supported */
+	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+		ext_active_speed = out_mad->data[62] >> 4;
+
+		switch (ext_active_speed) {
+		case 1:
+			props->active_speed = 16; /* FDR */
+			break;
+		case 2:
+			props->active_speed = 32; /* EDR */
+			break;
+		}
+	}
+
+	/* If reported active speed is QDR, check if is FDR-10 */
+	if (props->active_speed == 4) {
+		if (mdev->port_caps[port - 1].ext_port_cap &
+		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+			init_query_mad(in_mad);
+			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+			in_mad->attr_mod = cpu_to_be32(port);
+
+			err = mlx5_MAD_IFC(dev, 1, 1, port,
+					   NULL, NULL, in_mad, out_mad);
+			if (err)
+				goto out;
+
+			/* Checking LinkSpeedActive for FDR-10 */
+			if (out_mad->data[15] & 0x1)
+				props->active_speed = 8;
+		}
+	}
+
+out:
+	kfree(in_mad);
+	kfree(out_mad);
+
+	return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c6cb26e..085c24b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -40,6 +40,7 @@
 #include <linux/io-mapping.h>
 #include <linux/sched.h>
 #include <rdma/ib_user_verbs.h>
+#include <linux/mlx5/vport.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_umem.h>
 #include "user.h"
@@ -62,36 +63,172 @@
 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
 	DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device)
+{
+	struct mlx5_ib_dev *dev = to_mdev(device);
+
+	switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+	case MLX5_CAP_PORT_TYPE_IB:
+		return IB_LINK_LAYER_INFINIBAND;
+	case MLX5_CAP_PORT_TYPE_ETH:
+		return IB_LINK_LAYER_ETHERNET;
+	default:
+		return IB_LINK_LAYER_UNSPECIFIED;
+	}
+}
+
+static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+{
+	return !dev->mdev->issi;
+}
+
+enum {
+	MLX5_VPORT_ACCESS_METHOD_MAD,
+	MLX5_VPORT_ACCESS_METHOD_HCA,
+	MLX5_VPORT_ACCESS_METHOD_NIC,
+};
+
+static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+{
+	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+		return MLX5_VPORT_ACCESS_METHOD_MAD;
+
+	if (mlx5_ib_port_link_layer(ibdev) ==
+	    IB_LINK_LAYER_ETHERNET)
+		return MLX5_VPORT_ACCESS_METHOD_NIC;
+
+	return MLX5_VPORT_ACCESS_METHOD_HCA;
+}
+
+static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+					__be64 *sys_image_guid)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	struct mlx5_core_dev *mdev = dev->mdev;
+	u64 tmp;
+	int err;
+
+	switch (mlx5_get_vport_access_method(ibdev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_system_image_guid(ibdev,
+							    sys_image_guid);
+
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+		if (!err)
+			*sys_image_guid = cpu_to_be64(tmp);
+		return err;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+				u16 *max_pkeys)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	struct mlx5_core_dev *mdev = dev->mdev;
+
+	switch (mlx5_get_vport_access_method(ibdev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
+
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+	case MLX5_VPORT_ACCESS_METHOD_NIC:
+		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+						pkey_table_size));
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mlx5_query_vendor_id(struct ib_device *ibdev,
+				u32 *vendor_id)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+	switch (mlx5_get_vport_access_method(ibdev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
+
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+	case MLX5_VPORT_ACCESS_METHOD_NIC:
+		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+				__be64 *node_guid)
+{
+	u64 tmp;
+	int err;
+
+	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
+
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+		if (!err)
+			*node_guid = cpu_to_be64(tmp);
+		return err;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+struct mlx5_reg_node_desc {
+	u8	desc[64];
+};
+
+static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+	struct mlx5_reg_node_desc in;
+
+	if (mlx5_use_mad_ifc(dev))
+		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
+
+	memset(&in, 0, sizeof(in));
+
+	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+				    sizeof(struct mlx5_reg_node_desc),
+				    MLX5_REG_NODE_DESC, 0, 0);
+}
+
 static int mlx5_ib_query_device(struct ib_device *ibdev,
 				struct ib_device_attr *props,
 				struct ib_udata *uhw)
 {
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
-	struct ib_smp *in_mad  = NULL;
-	struct ib_smp *out_mad = NULL;
-	struct mlx5_general_caps *gen;
+	struct mlx5_core_dev *mdev = dev->mdev;
 	int err = -ENOMEM;
 	int max_rq_sg;
 	int max_sq_sg;
-	u64 flags;
 
 	if (uhw->inlen || uhw->outlen)
 		return -EINVAL;
 
-	gen = &dev->mdev->caps.gen;
-	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-	if (!in_mad || !out_mad)
-		goto out;
-
-	init_query_mad(in_mad);
-	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
-
-	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
-	if (err)
-		goto out;
-
 	memset(props, 0, sizeof(*props));
+	err = mlx5_query_system_image_guid(ibdev,
+					   &props->sys_image_guid);
+	if (err)
+		return err;
+
+	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
+	if (err)
+		return err;
+
+	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+	if (err)
+		return err;
 
 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
 		(fw_rev_min(dev->mdev) << 16) |
@@ -100,18 +237,18 @@
 		IB_DEVICE_PORT_ACTIVE_EVENT		|
 		IB_DEVICE_SYS_IMAGE_GUID		|
 		IB_DEVICE_RC_RNR_NAK_GEN;
-	flags = gen->flags;
-	if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
+
+	if (MLX5_CAP_GEN(mdev, pkv))
 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
-	if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
+	if (MLX5_CAP_GEN(mdev, qkv))
 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-	if (flags & MLX5_DEV_CAP_FLAG_APM)
+	if (MLX5_CAP_GEN(mdev, apm))
 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
 	props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
-	if (flags & MLX5_DEV_CAP_FLAG_XRC)
+	if (MLX5_CAP_GEN(mdev, xrc))
 		props->device_cap_flags |= IB_DEVICE_XRC;
 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
-	if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) {
+	if (MLX5_CAP_GEN(mdev, sho)) {
 		props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
 		/* At this stage no support for signature handover */
 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
@@ -120,222 +257,271 @@
 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
 				       IB_GUARD_T10DIF_CSUM;
 	}
-	if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)
+	if (MLX5_CAP_GEN(mdev, block_lb_mc))
 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 
-	props->vendor_id	   = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
-		0xffffff;
-	props->vendor_part_id	   = be16_to_cpup((__be16 *)(out_mad->data + 30));
-	props->hw_ver		   = be32_to_cpup((__be32 *)(out_mad->data + 32));
-	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
+	props->vendor_part_id	   = mdev->pdev->device;
+	props->hw_ver		   = mdev->pdev->revision;
 
 	props->max_mr_size	   = ~0ull;
-	props->page_size_cap	   = gen->min_page_sz;
-	props->max_qp		   = 1 << gen->log_max_qp;
-	props->max_qp_wr	   = gen->max_wqes;
-	max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
-	max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
-		sizeof(struct mlx5_wqe_data_seg);
+	props->page_size_cap	   = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+		     sizeof(struct mlx5_wqe_data_seg);
+	max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
+		     sizeof(struct mlx5_wqe_ctrl_seg)) /
+		     sizeof(struct mlx5_wqe_data_seg);
 	props->max_sge = min(max_rq_sg, max_sq_sg);
-	props->max_cq		   = 1 << gen->log_max_cq;
-	props->max_cqe		   = gen->max_cqes - 1;
-	props->max_mr		   = 1 << gen->log_max_mkey;
-	props->max_pd		   = 1 << gen->log_max_pd;
-	props->max_qp_rd_atom	   = 1 << gen->log_max_ra_req_qp;
-	props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
-	props->max_srq		   = 1 << gen->log_max_srq;
-	props->max_srq_wr	   = gen->max_srq_wqes - 1;
-	props->local_ca_ack_delay  = gen->local_ca_ack_delay;
+	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
+	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
+	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
+	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
 	props->max_srq_sge	   = max_rq_sg - 1;
 	props->max_fast_reg_page_list_len = (unsigned int)-1;
-	props->local_ca_ack_delay  = gen->local_ca_ack_delay;
 	props->atomic_cap	   = IB_ATOMIC_NONE;
 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
-	props->max_pkeys	   = be16_to_cpup((__be16 *)(out_mad->data + 28));
-	props->max_mcast_grp	   = 1 << gen->log_max_mcg;
-	props->max_mcast_qp_attach = gen->max_qp_mcg;
+	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
+	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 					   props->max_mcast_grp;
 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+	if (MLX5_CAP_GEN(mdev, pg))
 		props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
 	props->odp_caps = dev->odp_caps;
 #endif
 
-out:
-	kfree(in_mad);
-	kfree(out_mad);
+	return 0;
+}
 
+enum mlx5_ib_width {
+	MLX5_IB_WIDTH_1X	= 1 << 0,
+	MLX5_IB_WIDTH_2X	= 1 << 1,
+	MLX5_IB_WIDTH_4X	= 1 << 2,
+	MLX5_IB_WIDTH_8X	= 1 << 3,
+	MLX5_IB_WIDTH_12X	= 1 << 4
+};
+
+static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+				  u8 *ib_width)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	int err = 0;
+
+	if (active_width & MLX5_IB_WIDTH_1X) {
+		*ib_width = IB_WIDTH_1X;
+	} else if (active_width & MLX5_IB_WIDTH_2X) {
+		mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
+			    (int)active_width);
+		err = -EINVAL;
+	} else if (active_width & MLX5_IB_WIDTH_4X) {
+		*ib_width = IB_WIDTH_4X;
+	} else if (active_width & MLX5_IB_WIDTH_8X) {
+		*ib_width = IB_WIDTH_8X;
+	} else if (active_width & MLX5_IB_WIDTH_12X) {
+		*ib_width = IB_WIDTH_12X;
+	} else {
+		mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+			    (int)active_width);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int mlx5_mtu_to_ib_mtu(int mtu)
+{
+	switch (mtu) {
+	case 256: return 1;
+	case 512: return 2;
+	case 1024: return 3;
+	case 2048: return 4;
+	case 4096: return 5;
+	default:
+		pr_warn("invalid mtu\n");
+		return -1;
+	}
+}
+
+enum ib_max_vl_num {
+	__IB_MAX_VL_0		= 1,
+	__IB_MAX_VL_0_1		= 2,
+	__IB_MAX_VL_0_3		= 3,
+	__IB_MAX_VL_0_7		= 4,
+	__IB_MAX_VL_0_14	= 5,
+};
+
+enum mlx5_vl_hw_cap {
+	MLX5_VL_HW_0	= 1,
+	MLX5_VL_HW_0_1	= 2,
+	MLX5_VL_HW_0_2	= 3,
+	MLX5_VL_HW_0_3	= 4,
+	MLX5_VL_HW_0_4	= 5,
+	MLX5_VL_HW_0_5	= 6,
+	MLX5_VL_HW_0_6	= 7,
+	MLX5_VL_HW_0_7	= 8,
+	MLX5_VL_HW_0_14	= 15
+};
+
+static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
+				u8 *max_vl_num)
+{
+	switch (vl_hw_cap) {
+	case MLX5_VL_HW_0:
+		*max_vl_num = __IB_MAX_VL_0;
+		break;
+	case MLX5_VL_HW_0_1:
+		*max_vl_num = __IB_MAX_VL_0_1;
+		break;
+	case MLX5_VL_HW_0_3:
+		*max_vl_num = __IB_MAX_VL_0_3;
+		break;
+	case MLX5_VL_HW_0_7:
+		*max_vl_num = __IB_MAX_VL_0_7;
+		break;
+	case MLX5_VL_HW_0_14:
+		*max_vl_num = __IB_MAX_VL_0_14;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+			       struct ib_port_attr *props)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	struct mlx5_core_dev *mdev = dev->mdev;
+	struct mlx5_hca_vport_context *rep;
+	int max_mtu;
+	int oper_mtu;
+	int err;
+	u8 ib_link_width_oper;
+	u8 vl_hw_cap;
+
+	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+	if (!rep) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	memset(props, 0, sizeof(*props));
+
+	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
+	if (err)
+		goto out;
+
+	props->lid		= rep->lid;
+	props->lmc		= rep->lmc;
+	props->sm_lid		= rep->sm_lid;
+	props->sm_sl		= rep->sm_sl;
+	props->state		= rep->vport_state;
+	props->phys_state	= rep->port_physical_state;
+	props->port_cap_flags	= rep->cap_mask1;
+	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
+	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
+	props->bad_pkey_cntr	= rep->pkey_violation_counter;
+	props->qkey_viol_cntr	= rep->qkey_violation_counter;
+	props->subnet_timeout	= rep->subnet_timeout;
+	props->init_type_reply	= rep->init_type_reply;
+
+	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
+	if (err)
+		goto out;
+
+	err = translate_active_width(ibdev, ib_link_width_oper,
+				     &props->active_width);
+	if (err)
+		goto out;
+	err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
+					 port);
+	if (err)
+		goto out;
+
+	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
+
+	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
+
+	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
+
+	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
+
+	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
+	if (err)
+		goto out;
+
+	err = translate_max_vl_num(ibdev, vl_hw_cap,
+				   &props->max_vl_num);
+out:
+	kfree(rep);
 	return err;
 }
 
 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 		       struct ib_port_attr *props)
 {
-	struct mlx5_ib_dev *dev = to_mdev(ibdev);
-	struct ib_smp *in_mad  = NULL;
-	struct ib_smp *out_mad = NULL;
-	struct mlx5_general_caps *gen;
-	int ext_active_speed;
-	int err = -ENOMEM;
+	switch (mlx5_get_vport_access_method(ibdev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_port(ibdev, port, props);
 
-	gen = &dev->mdev->caps.gen;
-	if (port < 1 || port > gen->num_ports) {
-		mlx5_ib_warn(dev, "invalid port number %d\n", port);
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+		return mlx5_query_hca_port(ibdev, port, props);
+
+	default:
 		return -EINVAL;
 	}
-
-	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-	if (!in_mad || !out_mad)
-		goto out;
-
-	memset(props, 0, sizeof(*props));
-
-	init_query_mad(in_mad);
-	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
-	in_mad->attr_mod = cpu_to_be32(port);
-
-	err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
-	if (err) {
-		mlx5_ib_warn(dev, "err %d\n", err);
-		goto out;
-	}
-
-
-	props->lid		= be16_to_cpup((__be16 *)(out_mad->data + 16));
-	props->lmc		= out_mad->data[34] & 0x7;
-	props->sm_lid		= be16_to_cpup((__be16 *)(out_mad->data + 18));
-	props->sm_sl		= out_mad->data[36] & 0xf;
-	props->state		= out_mad->data[32] & 0xf;
-	props->phys_state	= out_mad->data[33] >> 4;
-	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
-	props->gid_tbl_len	= out_mad->data[50];
-	props->max_msg_sz	= 1 << gen->log_max_msg;
-	props->pkey_tbl_len	= gen->port[port - 1].pkey_table_len;
-	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
-	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
-	props->active_width	= out_mad->data[31] & 0xf;
-	props->active_speed	= out_mad->data[35] >> 4;
-	props->max_mtu		= out_mad->data[41] & 0xf;
-	props->active_mtu	= out_mad->data[36] >> 4;
-	props->subnet_timeout	= out_mad->data[51] & 0x1f;
-	props->max_vl_num	= out_mad->data[37] >> 4;
-	props->init_type_reply	= out_mad->data[41] >> 4;
-
-	/* Check if extended speeds (EDR/FDR/...) are supported */
-	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
-		ext_active_speed = out_mad->data[62] >> 4;
-
-		switch (ext_active_speed) {
-		case 1:
-			props->active_speed = 16; /* FDR */
-			break;
-		case 2:
-			props->active_speed = 32; /* EDR */
-			break;
-		}
-	}
-
-	/* If reported active speed is QDR, check if is FDR-10 */
-	if (props->active_speed == 4) {
-		if (gen->ext_port_cap[port - 1] &
-		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
-			init_query_mad(in_mad);
-			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
-			in_mad->attr_mod = cpu_to_be32(port);
-
-			err = mlx5_MAD_IFC(dev, 1, 1, port,
-					   NULL, NULL, in_mad, out_mad);
-			if (err)
-				goto out;
-
-			/* Checking LinkSpeedActive for FDR-10 */
-			if (out_mad->data[15] & 0x1)
-				props->active_speed = 8;
-		}
-	}
-
-out:
-	kfree(in_mad);
-	kfree(out_mad);
-
-	return err;
 }
 
 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
 			     union ib_gid *gid)
 {
-	struct ib_smp *in_mad  = NULL;
-	struct ib_smp *out_mad = NULL;
-	int err = -ENOMEM;
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	struct mlx5_core_dev *mdev = dev->mdev;
 
-	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-	if (!in_mad || !out_mad)
-		goto out;
+	switch (mlx5_get_vport_access_method(ibdev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
 
-	init_query_mad(in_mad);
-	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
-	in_mad->attr_mod = cpu_to_be32(port);
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
 
-	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
-	if (err)
-		goto out;
+	default:
+		return -EINVAL;
+	}
 
-	memcpy(gid->raw, out_mad->data + 8, 8);
-
-	init_query_mad(in_mad);
-	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
-	in_mad->attr_mod = cpu_to_be32(index / 8);
-
-	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
-	if (err)
-		goto out;
-
-	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
-
-out:
-	kfree(in_mad);
-	kfree(out_mad);
-	return err;
 }
 
 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 			      u16 *pkey)
 {
-	struct ib_smp *in_mad  = NULL;
-	struct ib_smp *out_mad = NULL;
-	int err = -ENOMEM;
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	struct mlx5_core_dev *mdev = dev->mdev;
 
-	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-	if (!in_mad || !out_mad)
-		goto out;
+	switch (mlx5_get_vport_access_method(ibdev)) {
+	case MLX5_VPORT_ACCESS_METHOD_MAD:
+		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
 
-	init_query_mad(in_mad);
-	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
-	in_mad->attr_mod = cpu_to_be32(index / 32);
-
-	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
-	if (err)
-		goto out;
-
-	*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
-
-out:
-	kfree(in_mad);
-	kfree(out_mad);
-	return err;
+	case MLX5_VPORT_ACCESS_METHOD_HCA:
+	case MLX5_VPORT_ACCESS_METHOD_NIC:
+		return mlx5_query_hca_vport_pkey(mdev, 0, port,  0, index,
+						 pkey);
+	default:
+		return -EINVAL;
+	}
 }
 
-struct mlx5_reg_node_desc {
-	u8	desc[64];
-};
-
 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
 				 struct ib_device_modify *props)
 {
@@ -396,7 +582,6 @@
 	struct mlx5_ib_alloc_ucontext_req_v2 req;
 	struct mlx5_ib_alloc_ucontext_resp resp;
 	struct mlx5_ib_ucontext *context;
-	struct mlx5_general_caps *gen;
 	struct mlx5_uuar_info *uuari;
 	struct mlx5_uar *uars;
 	int gross_uuars;
@@ -407,7 +592,6 @@
 	int i;
 	size_t reqlen;
 
-	gen = &dev->mdev->caps.gen;
 	if (!dev->ib_active)
 		return ERR_PTR(-EAGAIN);
 
@@ -440,14 +624,14 @@
 
 	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
 	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
-	resp.qp_tab_size      = 1 << gen->log_max_qp;
-	resp.bf_reg_size      = gen->bf_reg_size;
-	resp.cache_line_size  = L1_CACHE_BYTES;
-	resp.max_sq_desc_sz = gen->max_sq_desc_sz;
-	resp.max_rq_desc_sz = gen->max_rq_desc_sz;
-	resp.max_send_wqebb = gen->max_wqes;
-	resp.max_recv_wr = gen->max_wqes;
-	resp.max_srq_recv_wr = gen->max_srq_wqes;
+	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+	resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+	resp.cache_line_size = L1_CACHE_BYTES;
+	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
@@ -497,7 +681,7 @@
 	mutex_init(&context->db_page_mutex);
 
 	resp.tot_uuars = req.total_num_uuars;
-	resp.num_ports = gen->num_ports;
+	resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
 	err = ib_copy_to_udata(udata, &resp,
 			       sizeof(resp) - sizeof(resp.reserved));
 	if (err)
@@ -735,37 +919,15 @@
 
 static int init_node_data(struct mlx5_ib_dev *dev)
 {
-	struct ib_smp *in_mad  = NULL;
-	struct ib_smp *out_mad = NULL;
-	int err = -ENOMEM;
+	int err;
 
-	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-	if (!in_mad || !out_mad)
-		goto out;
-
-	init_query_mad(in_mad);
-	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
-
-	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
 	if (err)
-		goto out;
+		return err;
 
-	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+	dev->mdev->rev_id = dev->mdev->pdev->revision;
 
-	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
-
-	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
-	if (err)
-		goto out;
-
-	dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
-	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
-
-out:
-	kfree(in_mad);
-	kfree(out_mad);
-	return err;
+	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
 }
 
 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
@@ -899,11 +1061,9 @@
 
 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
 {
-	struct mlx5_general_caps *gen;
 	int port;
 
-	gen = &dev->mdev->caps.gen;
-	for (port = 1; port <= gen->num_ports; port++)
+	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
 		mlx5_query_ext_port_caps(dev, port);
 }
 
@@ -911,12 +1071,10 @@
 {
 	struct ib_device_attr *dprops = NULL;
 	struct ib_port_attr *pprops = NULL;
-	struct mlx5_general_caps *gen;
 	int err = -ENOMEM;
 	int port;
 	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
 
-	gen = &dev->mdev->caps.gen;
 	pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
 	if (!pprops)
 		goto out;
@@ -931,14 +1089,17 @@
 		goto out;
 	}
 
-	for (port = 1; port <= gen->num_ports; port++) {
+	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
 		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
 		if (err) {
-			mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
+			mlx5_ib_warn(dev, "query_port %d failed %d\n",
+				     port, err);
 			break;
 		}
-		gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
-		gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
+		dev->mdev->port_caps[port - 1].pkey_table_len =
+						dprops->max_pkeys;
+		dev->mdev->port_caps[port - 1].gid_table_len =
+						pprops->gid_tbl_len;
 		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
 			    dprops->max_pkeys, pprops->gid_tbl_len);
 	}
@@ -1167,8 +1328,29 @@
 	atomic_inc(&devr->p0->usecnt);
 	atomic_set(&devr->s0->usecnt, 0);
 
+	memset(&attr, 0, sizeof(attr));
+	attr.attr.max_sge = 1;
+	attr.attr.max_wr = 1;
+	attr.srq_type = IB_SRQT_BASIC;
+	devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
+	if (IS_ERR(devr->s1)) {
+		ret = PTR_ERR(devr->s1);
+		goto error5;
+	}
+	devr->s1->device	= &dev->ib_dev;
+	devr->s1->pd		= devr->p0;
+	devr->s1->uobject       = NULL;
+	devr->s1->event_handler = NULL;
+	devr->s1->srq_context   = NULL;
+	devr->s1->srq_type      = IB_SRQT_BASIC;
+	devr->s1->ext.xrc.cq	= devr->c0;
+	atomic_inc(&devr->p0->usecnt);
+	atomic_set(&devr->s0->usecnt, 0);
+
 	return 0;
 
+error5:
+	mlx5_ib_destroy_srq(devr->s0);
 error4:
 	mlx5_ib_dealloc_xrcd(devr->x1);
 error3:
@@ -1183,6 +1365,7 @@
 
 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
 {
+	mlx5_ib_destroy_srq(devr->s1);
 	mlx5_ib_destroy_srq(devr->s0);
 	mlx5_ib_dealloc_xrcd(devr->x0);
 	mlx5_ib_dealloc_xrcd(devr->x1);
@@ -1214,6 +1397,10 @@
 	int err;
 	int i;
 
+	/* don't create IB instance over Eth ports, no RoCE yet! */
+	if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
+		return NULL;
+
 	printk_once(KERN_INFO "%s", mlx5_version);
 
 	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -1226,15 +1413,16 @@
 	if (err)
 		goto err_dealloc;
 
-	get_ext_port_caps(dev);
+	if (mlx5_use_mad_ifc(dev))
+		get_ext_port_caps(dev);
 
 	MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
 	strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
 	dev->ib_dev.owner		= THIS_MODULE;
 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
-	dev->ib_dev.local_dma_lkey	= mdev->caps.gen.reserved_lkey;
-	dev->num_ports		= mdev->caps.gen.num_ports;
+	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
+	dev->num_ports		= MLX5_CAP_GEN(mdev, num_ports);
 	dev->ib_dev.phys_port_cnt     = dev->num_ports;
 	dev->ib_dev.num_comp_vectors    =
 		dev->mdev->priv.eq_table.num_comp_vectors;
@@ -1313,9 +1501,9 @@
 	dev->ib_dev.check_mr_status	= mlx5_ib_check_mr_status;
 	dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
 
-	mlx5_ib_internal_query_odp_caps(dev);
+	mlx5_ib_internal_fill_odp_caps(dev);
 
-	if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
+	if (MLX5_CAP_GEN(mdev, xrc)) {
 		dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
 		dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
 		dev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 178314e..7cae098 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -415,6 +415,7 @@
 	struct ib_xrcd	*x1;
 	struct ib_pd	*p0;
 	struct ib_srq	*s0;
+	struct ib_srq	*s1;
 };
 
 struct mlx5_ib_dev {
@@ -597,6 +598,22 @@
 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+					  struct ib_smp *out_mad);
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+					 __be64 *sys_image_guid);
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+				 u16 *max_pkeys);
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+				 u32 *vendor_id);
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+			    u16 *pkey);
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+			    union ib_gid *gid);
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+			    struct ib_port_attr *props);
 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 		       struct ib_port_attr *props);
 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
@@ -620,7 +637,7 @@
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
 
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
 			       struct mlx5_ib_pfault *pfault);
 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
@@ -634,9 +651,9 @@
 			      unsigned long end);
 
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-	return 0;
+	return;
 }
 
 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)		{}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 71c5935..bc9a0de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -975,8 +975,7 @@
 	struct mlx5_ib_mr *mr;
 	int inlen;
 	int err;
-	bool pg_cap = !!(dev->mdev->caps.gen.flags &
-			 MLX5_DEV_CAP_FLAG_ON_DMND_PG);
+	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
 
 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5099db0..aa8391e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -109,40 +109,33 @@
 	ib_umem_odp_unmap_dma_pages(umem, start, end);
 }
 
-#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do {	\
-	if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name)	\
-		ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name;	\
-} while (0)
-
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-	int err;
-	struct mlx5_odp_caps hw_caps;
 	struct ib_odp_caps *caps = &dev->odp_caps;
 
 	memset(caps, 0, sizeof(*caps));
 
-	if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
-		return 0;
-
-	err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
-	if (err)
-		goto out;
+	if (!MLX5_CAP_GEN(dev->mdev, pg))
+		return;
 
 	caps->general_caps = IB_ODP_SUPPORT;
-	COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
-			       SEND);
-	COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-			       SEND);
-	COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-			       RECV);
-	COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-			       WRITE);
-	COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-			       READ);
 
-out:
-	return err;
+	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+	return;
 }
 
 static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d35f62d..203c8a4 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -220,13 +220,11 @@
 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
 		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
 {
-	struct mlx5_general_caps *gen;
 	int wqe_size;
 	int wq_size;
 
-	gen = &dev->mdev->caps.gen;
 	/* Sanity check RQ size before proceeding */
-	if (cap->max_recv_wr  > gen->max_wqes)
+	if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
 		return -EINVAL;
 
 	if (!has_rq) {
@@ -246,10 +244,11 @@
 			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
 			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
 			qp->rq.wqe_cnt = wq_size / wqe_size;
-			if (wqe_size > gen->max_rq_desc_sz) {
+			if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
 				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
 					    wqe_size,
-					    gen->max_rq_desc_sz);
+					    MLX5_CAP_GEN(dev->mdev,
+							 max_wqe_sz_rq));
 				return -EINVAL;
 			}
 			qp->rq.wqe_shift = ilog2(wqe_size);
@@ -330,11 +329,9 @@
 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 			struct mlx5_ib_qp *qp)
 {
-	struct mlx5_general_caps *gen;
 	int wqe_size;
 	int wq_size;
 
-	gen = &dev->mdev->caps.gen;
 	if (!attr->cap.max_send_wr)
 		return 0;
 
@@ -343,9 +340,9 @@
 	if (wqe_size < 0)
 		return wqe_size;
 
-	if (wqe_size > gen->max_sq_desc_sz) {
+	if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
 		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
-			    wqe_size, gen->max_sq_desc_sz);
+			    wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
 		return -EINVAL;
 	}
 
@@ -358,9 +355,10 @@
 
 	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
 	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
-	if (qp->sq.wqe_cnt > gen->max_wqes) {
+	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
 		mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
-			    qp->sq.wqe_cnt, gen->max_wqes);
+			    qp->sq.wqe_cnt,
+			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
 		return -ENOMEM;
 	}
 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -375,13 +373,11 @@
 			    struct mlx5_ib_qp *qp,
 			    struct mlx5_ib_create_qp *ucmd)
 {
-	struct mlx5_general_caps *gen;
 	int desc_sz = 1 << qp->sq.wqe_shift;
 
-	gen = &dev->mdev->caps.gen;
-	if (desc_sz > gen->max_sq_desc_sz) {
+	if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
 		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
-			     desc_sz, gen->max_sq_desc_sz);
+			     desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
 		return -EINVAL;
 	}
 
@@ -393,9 +389,10 @@
 
 	qp->sq.wqe_cnt = ucmd->sq_wqe_count;
 
-	if (qp->sq.wqe_cnt > gen->max_wqes) {
+	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
 		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
-			     qp->sq.wqe_cnt, gen->max_wqes);
+			     qp->sq.wqe_cnt,
+			     1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
 		return -EINVAL;
 	}
 
@@ -768,7 +765,7 @@
 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
 	qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
-	err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+	err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
 	if (err) {
 		mlx5_ib_dbg(dev, "err %d\n", err);
 		goto err_uuar;
@@ -866,22 +863,21 @@
 			    struct ib_udata *udata, struct mlx5_ib_qp *qp)
 {
 	struct mlx5_ib_resources *devr = &dev->devr;
+	struct mlx5_core_dev *mdev = dev->mdev;
 	struct mlx5_ib_create_qp_resp resp;
 	struct mlx5_create_qp_mbox_in *in;
-	struct mlx5_general_caps *gen;
 	struct mlx5_ib_create_qp ucmd;
 	int inlen = sizeof(*in);
 	int err;
 
 	mlx5_ib_odp_create_qp(qp);
 
-	gen = &dev->mdev->caps.gen;
 	mutex_init(&qp->mutex);
 	spin_lock_init(&qp->sq.lock);
 	spin_lock_init(&qp->rq.lock);
 
 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
-		if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+		if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
 			mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
 			return -EINVAL;
 		} else {
@@ -914,15 +910,17 @@
 
 	if (pd) {
 		if (pd->uobject) {
+			__u32 max_wqes =
+				1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
 			mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
 			if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
 			    ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
 				mlx5_ib_dbg(dev, "invalid rq params\n");
 				return -EINVAL;
 			}
-			if (ucmd.sq_wqe_count > gen->max_wqes) {
+			if (ucmd.sq_wqe_count > max_wqes) {
 				mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
-					    ucmd.sq_wqe_count, gen->max_wqes);
+					    ucmd.sq_wqe_count, max_wqes);
 				return -EINVAL;
 			}
 			err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1014,7 +1012,8 @@
 			in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
 		} else {
 			in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
-			in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+			in->ctx.rq_type_srqn |=
+				cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
 		}
 	}
 
@@ -1226,7 +1225,6 @@
 				struct ib_qp_init_attr *init_attr,
 				struct ib_udata *udata)
 {
-	struct mlx5_general_caps *gen;
 	struct mlx5_ib_dev *dev;
 	struct mlx5_ib_qp *qp;
 	u16 xrcdn = 0;
@@ -1244,12 +1242,11 @@
 		}
 		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
 	}
-	gen = &dev->mdev->caps.gen;
 
 	switch (init_attr->qp_type) {
 	case IB_QPT_XRC_TGT:
 	case IB_QPT_XRC_INI:
-		if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
+		if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
 			mlx5_ib_dbg(dev, "XRC not supported\n");
 			return ERR_PTR(-ENOSYS);
 		}
@@ -1356,9 +1353,6 @@
 
 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
 {
-	struct mlx5_general_caps *gen;
-
-	gen = &dev->mdev->caps.gen;
 	if (rate == IB_RATE_PORT_CURRENT) {
 		return 0;
 	} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1366,7 +1360,7 @@
 	} else {
 		while (rate != IB_RATE_2_5_GBPS &&
 		       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-			 gen->stat_rate_support))
+			 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
 			--rate;
 	}
 
@@ -1377,10 +1371,8 @@
 			 struct mlx5_qp_path *path, u8 port, int attr_mask,
 			 u32 path_flags, const struct ib_qp_attr *attr)
 {
-	struct mlx5_general_caps *gen;
 	int err;
 
-	gen = &dev->mdev->caps.gen;
 	path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
 	path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
 
@@ -1391,9 +1383,11 @@
 	path->rlid	= cpu_to_be16(ah->dlid);
 
 	if (ah->ah_flags & IB_AH_GRH) {
-		if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
+		if (ah->grh.sgid_index >=
+		    dev->mdev->port_caps[port - 1].gid_table_len) {
 			pr_err("sgid_index (%u) too large. max is %d\n",
-			       ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
+			       ah->grh.sgid_index,
+			       dev->mdev->port_caps[port - 1].gid_table_len);
 			return -EINVAL;
 		}
 		path->grh_mlid |= 1 << 7;
@@ -1570,7 +1564,6 @@
 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
 	struct mlx5_ib_cq *send_cq, *recv_cq;
 	struct mlx5_qp_context *context;
-	struct mlx5_general_caps *gen;
 	struct mlx5_modify_qp_mbox_in *in;
 	struct mlx5_ib_pd *pd;
 	enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1579,7 +1572,6 @@
 	int mlx5_st;
 	int err;
 
-	gen = &dev->mdev->caps.gen;
 	in = kzalloc(sizeof(*in), GFP_KERNEL);
 	if (!in)
 		return -ENOMEM;
@@ -1619,7 +1611,8 @@
 			err = -EINVAL;
 			goto out;
 		}
-		context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
+		context->mtu_msgmax = (attr->path_mtu << 5) |
+				      (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
 	}
 
 	if (attr_mask & IB_QP_DEST_QPN)
@@ -1777,11 +1770,9 @@
 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
 	enum ib_qp_state cur_state, new_state;
-	struct mlx5_general_caps *gen;
 	int err = -EINVAL;
 	int port;
 
-	gen = &dev->mdev->caps.gen;
 	mutex_lock(&qp->mutex);
 
 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1793,21 +1784,25 @@
 		goto out;
 
 	if ((attr_mask & IB_QP_PORT) &&
-	    (attr->port_num == 0 || attr->port_num > gen->num_ports))
+	    (attr->port_num == 0 ||
+	     attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
 		goto out;
 
 	if (attr_mask & IB_QP_PKEY_INDEX) {
 		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
-		if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
+		if (attr->pkey_index >=
+		    dev->mdev->port_caps[port - 1].pkey_table_len)
 			goto out;
 	}
 
 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-	    attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
+	    attr->max_rd_atomic >
+	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
 		goto out;
 
 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-	    attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
+	    attr->max_dest_rd_atomic >
+	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
 		goto out;
 
 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -3009,7 +3004,7 @@
 	ib_ah_attr->port_num	  = path->port;
 
 	if (ib_ah_attr->port_num == 0 ||
-	    ib_ah_attr->port_num > dev->caps.gen.num_ports)
+	    ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
 		return;
 
 	ib_ah_attr->sl = path->sl & 0xf;
@@ -3135,12 +3130,10 @@
 					  struct ib_udata *udata)
 {
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
-	struct mlx5_general_caps *gen;
 	struct mlx5_ib_xrcd *xrcd;
 	int err;
 
-	gen = &dev->mdev->caps.gen;
-	if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
+	if (!MLX5_CAP_GEN(dev->mdev, xrc))
 		return ERR_PTR(-ENOSYS);
 
 	xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 02d77a2..e008505 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -165,7 +165,7 @@
 		return err;
 	}
 
-	if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+	if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
 		mlx5_ib_dbg(dev, "buf alloc failed\n");
 		err = -ENOMEM;
 		goto err_db;
@@ -236,7 +236,6 @@
 				  struct ib_udata *udata)
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
-	struct mlx5_general_caps *gen;
 	struct mlx5_ib_srq *srq;
 	int desc_size;
 	int buf_size;
@@ -245,13 +244,13 @@
 	int uninitialized_var(inlen);
 	int is_xrc;
 	u32 flgs, xrcdn;
+	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
-	gen = &dev->mdev->caps.gen;
 	/* Sanity check SRQ size before proceeding */
-	if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
+	if (init_attr->attr.max_wr >= max_srq_wqes) {
 		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
 			    init_attr->attr.max_wr,
-			    gen->max_srq_wqes);
+			    max_srq_wqes);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -303,7 +302,7 @@
 
 	in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
 	in->ctx.db_record = cpu_to_be64(srq->db.dma);
-	err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
+	err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
 	kvfree(in);
 	if (err) {
 		mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 1cc6ca8..85cfa4f 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2264,7 +2264,7 @@
 		return -1;
 	}
 	card->owner = THIS_MODULE;
-	init_timer(&card->listentimer);
+	setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
 	strcpy(card->name, id);
 	card->contrnr = contr;
 	card->nbchan = profp->nbchannel;
@@ -2331,8 +2331,6 @@
 	card->cipmask = 0x1FFF03FF;	/* any */
 	card->cipmask2 = 0;
 
-	card->listentimer.data = (unsigned long)card;
-	card->listentimer.function = listentimerfunc;
 	send_listen(card);
 	mod_timer(&card->listentimer, jiffies + 60 * HZ);
 
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 97465ac..eb83d94 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -237,7 +237,7 @@
 
 config HISAX_NETJET
 	bool "NETjet card"
-	depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+	depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
 	depends on VIRT_TO_BUS
 	help
 	  This enables HiSax support for the NetJet from Traverse
@@ -249,7 +249,7 @@
 
 config HISAX_NETJET_U
 	bool "NETspider U card"
-	depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+	depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
 	depends on VIRT_TO_BUS
 	help
 	  This enables HiSax support for the Netspider U interface ISDN card
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ead0a4f..a0fdbc0 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -267,8 +267,8 @@
 	}
 
 	// The descriptor is wrong for some early samples of the ST5481 chip
-	altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
-	altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
+	altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32);
+	altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32);
 
 	// Use alternative setting 3 on interface 0 to have 2B+D
 	if ((status = usb_set_interface(dev, 0, 3)) < 0) {
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 8dc7290..0d29b5a 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -601,14 +601,14 @@
 };
 
 static int
-data_sock_create(struct net *net, struct socket *sock, int protocol)
+data_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 {
 	struct sock *sk;
 
 	if (sock->type != SOCK_DGRAM)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
@@ -756,14 +756,14 @@
 
 
 static int
-base_sock_create(struct net *net, struct socket *sock, int protocol)
+base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 {
 	struct sock *sk;
 
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
@@ -785,7 +785,7 @@
 
 	switch (proto) {
 	case ISDN_P_BASE:
-		err = base_sock_create(net, sock, proto);
+		err = base_sock_create(net, sock, proto, kern);
 		break;
 	case ISDN_P_TE_S0:
 	case ISDN_P_NT_S0:
@@ -799,7 +799,7 @@
 	case ISDN_P_B_L2DTMF:
 	case ISDN_P_B_L2DSP:
 	case ISDN_P_B_L2DSPHDLC:
-		err = data_sock_create(net, sock, proto);
+		err = data_sock_create(net, sock, proto, kern);
 		break;
 	default:
 		return err;
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 433f823..ec1f46a 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -267,6 +267,10 @@
 static const struct pci_device_id cmodio_pci_ids[] = {
 	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
 	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0201 },
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0202 },
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0201 },
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0202 },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index df51d60..019fcef 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -179,6 +179,20 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called vxlan.
 
+config GENEVE
+       tristate "Generic Network Virtualization Encapsulation netdev"
+       depends on INET && GENEVE_CORE
+       select NET_IP_TUNNEL
+       ---help---
+	  This allows one to create geneve virtual interfaces that provide
+	  Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+	  to tunnel virtual network infrastructure in virtualized environments.
+	  For more information see:
+	    http://tools.ietf.org/html/draft-gross-geneve-02
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called geneve.
+
 config NETCONSOLE
 	tristate "Network console logging support"
 	---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e25fdd7..c12cb22 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_GENEVE) += geneve.o
 obj-$(CONFIG_NLMON) += nlmon.o
 
 #
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index 84fb634..2a9c3c3 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -15,10 +15,6 @@
 	  COM90xx type card, so say Y (or M) to "ARCnet COM90xx chipset
 	  support" below.
 
-	  You might also want to have a look at the Ethernet-HOWTO, available
-	  from <http://www.tldp.org/docs.html#howto>(even though ARCnet
-	  is not really Ethernet).
-
 	  To compile this driver as a module, choose M here. The module will
 	  be called arcnet.
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index fbd54f0..7fde4d5 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -75,10 +75,10 @@
 /* Port Key definitions
  * key is determined according to the link speed, duplex and
  * user key (which is yet not supported)
- * --------------------------------------------------------------
- * Port key :	| User key	| Speed		| Duplex	|
- * --------------------------------------------------------------
- * 16		  6		  1		  0
+ *           --------------------------------------------------------------
+ * Port key  | User key (10 bits)           | Speed (5 bits)      | Duplex|
+ *           --------------------------------------------------------------
+ *           |15                           6|5                   1|0
  */
 #define  AD_DUPLEX_KEY_MASKS    0x1
 #define  AD_SPEED_KEY_MASKS     0x3E
@@ -1908,8 +1908,14 @@
 
 		BOND_AD_INFO(bond).aggregator_identifier = 0;
 
-		BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
-		BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+		BOND_AD_INFO(bond).system.sys_priority =
+			bond->params.ad_actor_sys_prio;
+		if (is_zero_ether_addr(bond->params.ad_actor_system))
+			BOND_AD_INFO(bond).system.sys_mac_addr =
+			    *((struct mac_addr *)bond->dev->dev_addr);
+		else
+			BOND_AD_INFO(bond).system.sys_mac_addr =
+			    *((struct mac_addr *)bond->params.ad_actor_system);
 
 		/* initialize how many times this module is called in one
 		 * second (should be about every 100ms)
@@ -1945,10 +1951,10 @@
 
 		port->slave = slave;
 		port->actor_port_number = SLAVE_AD_INFO(slave)->id;
-		/* key is determined according to the link speed, duplex and user key(which
-		 * is yet not supported)
+		/* key is determined according to the link speed, duplex and
+		 * user key
 		 */
-		port->actor_admin_port_key = 0;
+		port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
 		port->actor_admin_port_key |= __get_duplex(port);
 		port->actor_admin_port_key |= (__get_link_speed(port) << 1);
 		port->actor_oper_port_key = port->actor_admin_port_key;
@@ -1959,6 +1965,8 @@
 			port->sm_vars &= ~AD_PORT_LACP_ENABLED;
 		/* actor system is the bond's system */
 		port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+		port->actor_system_priority =
+		    BOND_AD_INFO(bond).system.sys_priority;
 		/* tx timer(to verify that no more than MAX_TX_IN_SECOND
 		 * lacpdu's are sent in one second)
 		 */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d5fe5d5..19eb990 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,7 +76,7 @@
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 #include <net/switchdev.h>
 #include <net/bonding.h>
 #include <net/bond_3ad.h>
@@ -1015,10 +1015,7 @@
 	netdev_features_t mask;
 	struct slave *slave;
 
-	/* If any slave has the offload feature flag set,
-	 * set the offload flag on the bond.
-	 */
-	mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+	mask = features;
 
 	features &= ~NETIF_F_ONE_FOR_ALL;
 	features |= NETIF_F_ALL_FOR_ALL;
@@ -3054,16 +3051,15 @@
 	int noff, proto = -1;
 
 	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
-		return skb_flow_dissect(skb, fk);
+		return skb_flow_dissect_flow_keys(skb, fk);
 
-	fk->ports = 0;
+	fk->ports.ports = 0;
 	noff = skb_network_offset(skb);
 	if (skb->protocol == htons(ETH_P_IP)) {
 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
 			return false;
 		iph = ip_hdr(skb);
-		fk->src = iph->saddr;
-		fk->dst = iph->daddr;
+		iph_to_flow_copy_v4addrs(fk, iph);
 		noff += iph->ihl << 2;
 		if (!ip_is_fragment(iph))
 			proto = iph->protocol;
@@ -3071,15 +3067,14 @@
 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
 			return false;
 		iph6 = ipv6_hdr(skb);
-		fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
-		fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+		iph_to_flow_copy_v6addrs(fk, iph6);
 		noff += sizeof(*iph6);
 		proto = iph6->nexthdr;
 	} else {
 		return false;
 	}
 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
-		fk->ports = skb_flow_get_ports(skb, noff, proto);
+		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
 
 	return true;
 }
@@ -3105,8 +3100,9 @@
 	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
 		hash = bond_eth_hash(skb);
 	else
-		hash = (__force u32)flow.ports;
-	hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+		hash = (__force u32)flow.ports.ports;
+	hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+		(__force u32)flow_get_u32_src(&flow);
 	hash ^= (hash >> 16);
 	hash ^= (hash >> 8);
 
@@ -4039,8 +4035,12 @@
 	.ndo_add_slave		= bond_enslave,
 	.ndo_del_slave		= bond_release,
 	.ndo_fix_features	= bond_fix_features,
-	.ndo_bridge_setlink	= ndo_dflt_netdev_switch_port_bridge_setlink,
-	.ndo_bridge_dellink	= ndo_dflt_netdev_switch_port_bridge_dellink,
+	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
+	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
+	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
+	.ndo_fdb_add		= switchdev_port_fdb_add,
+	.ndo_fdb_del		= switchdev_port_fdb_del,
+	.ndo_fdb_dump		= switchdev_port_fdb_dump,
 	.ndo_features_check	= passthru_features_check,
 };
 
@@ -4140,6 +4140,8 @@
 	struct bond_opt_value newval;
 	const struct bond_opt_value *valptr;
 	int arp_all_targets_value;
+	u16 ad_actor_sys_prio = 0;
+	u16 ad_user_port_key = 0;
 
 	/* Convert string parameters. */
 	if (mode) {
@@ -4434,6 +4436,24 @@
 		fail_over_mac_value = BOND_FOM_NONE;
 	}
 
+	bond_opt_initstr(&newval, "default");
+	valptr = bond_opt_parse(
+			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
+				     &newval);
+	if (!valptr) {
+		pr_err("Error: No ad_actor_sys_prio default value");
+		return -EINVAL;
+	}
+	ad_actor_sys_prio = valptr->value;
+
+	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
+				&newval);
+	if (!valptr) {
+		pr_err("Error: No ad_user_port_key default value");
+		return -EINVAL;
+	}
+	ad_user_port_key = valptr->value;
+
 	if (lp_interval == 0) {
 		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
 			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4462,6 +4482,9 @@
 	params->lp_interval = lp_interval;
 	params->packets_per_slave = packets_per_slave;
 	params->tlb_dynamic_lb = 1; /* Default value */
+	params->ad_actor_sys_prio = ad_actor_sys_prio;
+	eth_zero_addr(params->ad_actor_system);
+	params->ad_user_port_key = ad_user_port_key;
 	if (packets_per_slave > 0) {
 		params->reciprocal_packets_per_slave =
 			reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 7b112436..1bda292 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -28,6 +28,8 @@
 		nla_total_size(MAX_ADDR_LEN) +	/* IFLA_BOND_SLAVE_PERM_HWADDR */
 		nla_total_size(sizeof(u16)) +	/* IFLA_BOND_SLAVE_QUEUE_ID */
 		nla_total_size(sizeof(u16)) +	/* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
+		nla_total_size(sizeof(u16)) +	/* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
 		0;
 }
 
@@ -56,12 +58,23 @@
 
 	if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
 		const struct aggregator *agg;
+		const struct port *ad_port;
 
+		ad_port = &SLAVE_AD_INFO(slave)->port;
 		agg = SLAVE_AD_INFO(slave)->port.aggregator;
-		if (agg)
+		if (agg) {
 			if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
 					agg->aggregator_identifier))
 				goto nla_put_failure;
+			if (nla_put_u8(skb,
+				       IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
+				       ad_port->actor_oper_port_state))
+				goto nla_put_failure;
+			if (nla_put_u16(skb,
+					IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+					ad_port->partner_oper.port_state))
+				goto nla_put_failure;
+		}
 	}
 
 	return 0;
@@ -94,6 +107,10 @@
 	[IFLA_BOND_AD_LACP_RATE]	= { .type = NLA_U8 },
 	[IFLA_BOND_AD_SELECT]		= { .type = NLA_U8 },
 	[IFLA_BOND_AD_INFO]		= { .type = NLA_NESTED },
+	[IFLA_BOND_AD_ACTOR_SYS_PRIO]	= { .type = NLA_U16 },
+	[IFLA_BOND_AD_USER_PORT_KEY]	= { .type = NLA_U16 },
+	[IFLA_BOND_AD_ACTOR_SYSTEM]	= { .type = NLA_BINARY,
+					    .len  = ETH_ALEN },
 };
 
 static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -379,6 +396,36 @@
 		if (err)
 			return err;
 	}
+	if (data[IFLA_BOND_AD_ACTOR_SYS_PRIO]) {
+		int actor_sys_prio =
+			nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
+
+		bond_opt_initval(&newval, actor_sys_prio);
+		err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
+		int port_key =
+			nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
+
+		bond_opt_initval(&newval, port_key);
+		err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
+		if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
+			return -EINVAL;
+
+		bond_opt_initval(&newval,
+				 nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+		err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+		if (err)
+			return err;
+	}
 	return 0;
 }
 
@@ -426,6 +473,9 @@
 		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
 		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
 		nla_total_size(ETH_ALEN) +    /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
+		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
+		nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
 		0;
 }
 
@@ -551,6 +601,20 @@
 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
 		struct ad_info info;
 
+		if (capable(CAP_NET_ADMIN)) {
+			if (nla_put_u16(skb, IFLA_BOND_AD_ACTOR_SYS_PRIO,
+					bond->params.ad_actor_sys_prio))
+				goto nla_put_failure;
+
+			if (nla_put_u16(skb, IFLA_BOND_AD_USER_PORT_KEY,
+					bond->params.ad_user_port_key))
+				goto nla_put_failure;
+
+			if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
+				    sizeof(bond->params.ad_actor_system),
+				    &bond->params.ad_actor_system))
+				goto nla_put_failure;
+		}
 		if (!bond_3ad_get_active_agg_info(bond, &info)) {
 			struct nlattr *nest;
 
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e8d3c1d..e9c624d 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,12 @@
 				  const struct bond_opt_value *newval);
 static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
 				  const struct bond_opt_value *newval);
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+					     const struct bond_opt_value *newval);
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+					   const struct bond_opt_value *newval);
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+					    const struct bond_opt_value *newval);
 
 
 static const struct bond_opt_value bond_mode_tbl[] = {
@@ -186,6 +192,18 @@
 	{ NULL,  -1, 0}
 };
 
+static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
+	{ "minval",  1,     BOND_VALFLAG_MIN},
+	{ "maxval",  65535, BOND_VALFLAG_MAX | BOND_VALFLAG_DEFAULT},
+	{ NULL,      -1,    0},
+};
+
+static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
+	{ "minval",  0,     BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+	{ "maxval",  1023,  BOND_VALFLAG_MAX},
+	{ NULL,      -1,    0},
+};
+
 static const struct bond_option bond_opts[BOND_OPT_LAST] = {
 	[BOND_OPT_MODE] = {
 		.id = BOND_OPT_MODE,
@@ -379,6 +397,29 @@
 		.values = bond_tlb_dynamic_lb_tbl,
 		.flags = BOND_OPTFLAG_IFDOWN,
 		.set = bond_option_tlb_dynamic_lb_set,
+	},
+	[BOND_OPT_AD_ACTOR_SYS_PRIO] = {
+		.id = BOND_OPT_AD_ACTOR_SYS_PRIO,
+		.name = "ad_actor_sys_prio",
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+		.flags = BOND_OPTFLAG_IFDOWN,
+		.values = bond_ad_actor_sys_prio_tbl,
+		.set = bond_option_ad_actor_sys_prio_set,
+	},
+	[BOND_OPT_AD_ACTOR_SYSTEM] = {
+		.id = BOND_OPT_AD_ACTOR_SYSTEM,
+		.name = "ad_actor_system",
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+		.flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN,
+		.set = bond_option_ad_actor_system_set,
+	},
+	[BOND_OPT_AD_USER_PORT_KEY] = {
+		.id = BOND_OPT_AD_USER_PORT_KEY,
+		.name = "ad_user_port_key",
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+		.flags = BOND_OPTFLAG_IFDOWN,
+		.values = bond_ad_user_port_key_tbl,
+		.set = bond_option_ad_user_port_key_set,
 	}
 };
 
@@ -1349,3 +1390,53 @@
 
 	return 0;
 }
+
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+					     const struct bond_opt_value *newval)
+{
+	netdev_info(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
+		    newval->value);
+
+	bond->params.ad_actor_sys_prio = newval->value;
+	return 0;
+}
+
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+					   const struct bond_opt_value *newval)
+{
+	u8 macaddr[ETH_ALEN];
+	u8 *mac;
+	int i;
+
+	if (newval->string) {
+		i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+			   &macaddr[0], &macaddr[1], &macaddr[2],
+			   &macaddr[3], &macaddr[4], &macaddr[5]);
+		if (i != ETH_ALEN)
+			goto err;
+		mac = macaddr;
+	} else {
+		mac = (u8 *)&newval->value;
+	}
+
+	if (!is_valid_ether_addr(mac))
+		goto err;
+
+	netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
+	ether_addr_copy(bond->params.ad_actor_system, mac);
+	return 0;
+
+err:
+	netdev_err(bond->dev, "Invalid MAC address.\n");
+	return -EINVAL;
+}
+
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+					    const struct bond_opt_value *newval)
+{
+	netdev_info(bond->dev, "Setting ad_user_port_key to %llu\n",
+		    newval->value);
+
+	bond->params.ad_user_port_key = newval->value;
+	return 0;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index b20b35ac..f514fe5 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -135,23 +135,30 @@
 					  bond->params.ad_select);
 		seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
 			   optval->string);
+		if (capable(CAP_NET_ADMIN)) {
+			seq_printf(seq, "System priority: %d\n",
+				   BOND_AD_INFO(bond).system.sys_priority);
+			seq_printf(seq, "System MAC address: %pM\n",
+				   &BOND_AD_INFO(bond).system.sys_mac_addr);
 
-		if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
-			seq_printf(seq, "bond %s has no active aggregator\n",
-				   bond->dev->name);
-		} else {
-			seq_printf(seq, "Active Aggregator Info:\n");
+			if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+				seq_printf(seq,
+					   "bond %s has no active aggregator\n",
+					   bond->dev->name);
+			} else {
+				seq_printf(seq, "Active Aggregator Info:\n");
 
-			seq_printf(seq, "\tAggregator ID: %d\n",
-				   ad_info.aggregator_id);
-			seq_printf(seq, "\tNumber of ports: %d\n",
-				   ad_info.ports);
-			seq_printf(seq, "\tActor Key: %d\n",
-				   ad_info.actor_key);
-			seq_printf(seq, "\tPartner Key: %d\n",
-				   ad_info.partner_key);
-			seq_printf(seq, "\tPartner Mac Address: %pM\n",
-				   ad_info.partner_system);
+				seq_printf(seq, "\tAggregator ID: %d\n",
+					   ad_info.aggregator_id);
+				seq_printf(seq, "\tNumber of ports: %d\n",
+					   ad_info.ports);
+				seq_printf(seq, "\tActor Key: %d\n",
+					   ad_info.actor_key);
+				seq_printf(seq, "\tPartner Key: %d\n",
+					   ad_info.partner_key);
+				seq_printf(seq, "\tPartner Mac Address: %pM\n",
+					   ad_info.partner_system);
+			}
 		}
 	}
 }
@@ -195,29 +202,35 @@
 			seq_printf(seq, "Partner Churned Count: %d\n",
 				   port->churn_partner_count);
 
-			seq_puts(seq, "details actor lacp pdu:\n");
-			seq_printf(seq, "    system priority: %d\n",
-				   port->actor_system_priority);
-			seq_printf(seq, "    port key: %d\n",
-				   port->actor_oper_port_key);
-			seq_printf(seq, "    port priority: %d\n",
-				   port->actor_port_priority);
-			seq_printf(seq, "    port number: %d\n",
-				   port->actor_port_number);
-			seq_printf(seq, "    port state: %d\n",
-				   port->actor_oper_port_state);
+			if (capable(CAP_NET_ADMIN)) {
+				seq_puts(seq, "details actor lacp pdu:\n");
+				seq_printf(seq, "    system priority: %d\n",
+					   port->actor_system_priority);
+				seq_printf(seq, "    system mac address: %pM\n",
+					   &port->actor_system);
+				seq_printf(seq, "    port key: %d\n",
+					   port->actor_oper_port_key);
+				seq_printf(seq, "    port priority: %d\n",
+					   port->actor_port_priority);
+				seq_printf(seq, "    port number: %d\n",
+					   port->actor_port_number);
+				seq_printf(seq, "    port state: %d\n",
+					   port->actor_oper_port_state);
 
-			seq_puts(seq, "details partner lacp pdu:\n");
-			seq_printf(seq, "    system priority: %d\n",
-				   port->partner_oper.system_priority);
-			seq_printf(seq, "    oper key: %d\n",
-				   port->partner_oper.key);
-			seq_printf(seq, "    port priority: %d\n",
-				   port->partner_oper.port_priority);
-			seq_printf(seq, "    port number: %d\n",
-				   port->partner_oper.port_number);
-			seq_printf(seq, "    port state: %d\n",
-				   port->partner_oper.port_state);
+				seq_puts(seq, "details partner lacp pdu:\n");
+				seq_printf(seq, "    system priority: %d\n",
+					   port->partner_oper.system_priority);
+				seq_printf(seq, "    system mac address: %pM\n",
+					   &port->partner_oper.system);
+				seq_printf(seq, "    oper key: %d\n",
+					   port->partner_oper.key);
+				seq_printf(seq, "    port priority: %d\n",
+					   port->partner_oper.port_priority);
+				seq_printf(seq, "    port number: %d\n",
+					   port->partner_oper.port_number);
+				seq_printf(seq, "    port state: %d\n",
+					   port->partner_oper.port_state);
+			}
 		} else {
 			seq_puts(seq, "Aggregator ID: N/A\n");
 		}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 7e9e151..31835a4 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -549,7 +549,7 @@
 	int count = 0;
 	struct bonding *bond = to_bond(d);
 
-	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+	if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
 		struct ad_info ad_info;
 		count = sprintf(buf, "%d\n",
 				bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -569,7 +569,7 @@
 	int count = 0;
 	struct bonding *bond = to_bond(d);
 
-	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+	if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
 		struct ad_info ad_info;
 		count = sprintf(buf, "%d\n",
 				bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -589,7 +589,7 @@
 	int count = 0;
 	struct bonding *bond = to_bond(d);
 
-	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+	if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
 		struct ad_info ad_info;
 		if (!bond_3ad_get_active_agg_info(bond, &ad_info))
 			count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -692,6 +692,49 @@
 static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
 		   bonding_show_packets_per_slave, bonding_sysfs_store_option);
 
+static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct bonding *bond = to_bond(d);
+
+	if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
+		return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+
+	return 0;
+}
+static DEVICE_ATTR(ad_actor_sys_prio, S_IRUGO | S_IWUSR,
+		   bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_actor_system(struct device *d,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct bonding *bond = to_bond(d);
+
+	if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
+		return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+
+	return 0;
+}
+
+static DEVICE_ATTR(ad_actor_system, S_IRUGO | S_IWUSR,
+		   bonding_show_ad_actor_system, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_user_port_key(struct device *d,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct bonding *bond = to_bond(d);
+
+	if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
+		return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+
+	return 0;
+}
+static DEVICE_ATTR(ad_user_port_key, S_IRUGO | S_IWUSR,
+		   bonding_show_ad_user_port_key, bonding_sysfs_store_option);
+
 static struct attribute *per_bond_attrs[] = {
 	&dev_attr_slaves.attr,
 	&dev_attr_mode.attr,
@@ -725,6 +768,9 @@
 	&dev_attr_lp_interval.attr,
 	&dev_attr_packets_per_slave.attr,
 	&dev_attr_tlb_dynamic_lb.attr,
+	&dev_attr_ad_actor_sys_prio.attr,
+	&dev_attr_ad_actor_system.attr,
+	&dev_attr_ad_user_port_key.attr,
 	NULL,
 };
 
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 23618a8..7d16c51 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -80,6 +80,36 @@
 }
 static SLAVE_ATTR_RO(ad_aggregator_id);
 
+static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
+{
+	const struct port *ad_port;
+
+	if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+		ad_port = &SLAVE_AD_INFO(slave)->port;
+		if (ad_port->aggregator)
+			return sprintf(buf, "%u\n",
+				       ad_port->actor_oper_port_state);
+	}
+
+	return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_actor_oper_port_state);
+
+static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
+{
+	const struct port *ad_port;
+
+	if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+		ad_port = &SLAVE_AD_INFO(slave)->port;
+		if (ad_port->aggregator)
+			return sprintf(buf, "%u\n",
+				       ad_port->partner_oper.port_state);
+	}
+
+	return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_partner_oper_port_state);
+
 static const struct slave_attribute *slave_attrs[] = {
 	&slave_attr_state,
 	&slave_attr_mii_status,
@@ -87,6 +117,8 @@
 	&slave_attr_perm_hwaddr,
 	&slave_attr_queue_id,
 	&slave_attr_ad_aggregator_id,
+	&slave_attr_ad_actor_oper_port_state,
+	&slave_attr_ad_partner_oper_port_state,
 	NULL
 };
 
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b0f6924..e9b1810 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,6 +440,9 @@
 		struct can_frame *cf = (struct can_frame *)skb->data;
 		u8 dlc = cf->can_dlc;
 
+		if (!(skb->tstamp.tv64))
+			__net_timestamp(skb);
+
 		netif_rx(priv->echo_skb[idx]);
 		priv->echo_skb[idx] = NULL;
 
@@ -575,6 +578,7 @@
 	if (unlikely(!skb))
 		return NULL;
 
+	__net_timestamp(skb);
 	skb->protocol = htons(ETH_P_CAN);
 	skb->pkt_type = PACKET_BROADCAST;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -603,6 +607,7 @@
 	if (unlikely(!skb))
 		return NULL;
 
+	__net_timestamp(skb);
 	skb->protocol = htons(ETH_P_CANFD);
 	skb->pkt_type = PACKET_BROADCAST;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ad0a7e8..6201c5a 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -93,13 +93,13 @@
 	(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
 
 /* FLEXCAN control register 2 (CTRL2) bits */
-#define FLEXCAN_CRL2_ECRWRE		BIT(29)
-#define FLEXCAN_CRL2_WRMFRZ		BIT(28)
-#define FLEXCAN_CRL2_RFFN(x)		(((x) & 0x0f) << 24)
-#define FLEXCAN_CRL2_TASD(x)		(((x) & 0x1f) << 19)
-#define FLEXCAN_CRL2_MRP		BIT(18)
-#define FLEXCAN_CRL2_RRS		BIT(17)
-#define FLEXCAN_CRL2_EACEN		BIT(16)
+#define FLEXCAN_CTRL2_ECRWRE		BIT(29)
+#define FLEXCAN_CTRL2_WRMFRZ		BIT(28)
+#define FLEXCAN_CTRL2_RFFN(x)		(((x) & 0x0f) << 24)
+#define FLEXCAN_CTRL2_TASD(x)		(((x) & 0x1f) << 19)
+#define FLEXCAN_CTRL2_MRP		BIT(18)
+#define FLEXCAN_CTRL2_RRS		BIT(17)
+#define FLEXCAN_CTRL2_EACEN		BIT(16)
 
 /* FLEXCAN memory error control register (MECR) bits */
 #define FLEXCAN_MECR_ECRWRDIS		BIT(31)
@@ -158,7 +158,6 @@
 	 FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
 
 /* FLEXCAN message buffers */
-#define FLEXCAN_MB_CNT_CODE(x)		(((x) & 0xf) << 24)
 #define FLEXCAN_MB_CODE_RX_INACTIVE	(0x0 << 24)
 #define FLEXCAN_MB_CODE_RX_EMPTY	(0x4 << 24)
 #define FLEXCAN_MB_CODE_RX_FULL		(0x2 << 24)
@@ -184,14 +183,14 @@
  * FLEXCAN hardware feature flags
  *
  * Below is some version info we got:
- *    SOC   Version   IP-Version  Glitch-  [TR]WRN_INT  Memory err
- *                                Filter?   connected?  detection
- *   MX25  FlexCAN2  03.00.00.00     no         no         no
- *   MX28  FlexCAN2  03.00.04.00    yes        yes         no
- *   MX35  FlexCAN2  03.00.00.00     no         no         no
- *   MX53  FlexCAN2  03.00.00.00    yes         no         no
- *   MX6s  FlexCAN3  10.00.12.00    yes        yes         no
- *   VF610 FlexCAN3  ?               no        yes        yes
+ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT Memory err RTR re-
+ *                                Filter? connected?  detection  ception in MB
+ *   MX25  FlexCAN2  03.00.00.00     no        no         no        no
+ *   MX28  FlexCAN2  03.00.04.00    yes       yes         no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no         no        no
+ *   MX53  FlexCAN2  03.00.00.00    yes        no         no        no
+ *   MX6s  FlexCAN3  10.00.12.00    yes       yes         no       yes
+ *   VF610 FlexCAN3  ?               no       yes        yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -221,7 +220,7 @@
 	u32 imask1;		/* 0x28 */
 	u32 iflag2;		/* 0x2c */
 	u32 iflag1;		/* 0x30 */
-	u32 crl2;		/* 0x34 */
+	u32 ctrl2;		/* 0x34 */
 	u32 esr2;		/* 0x38 */
 	u32 imeur;		/* 0x3c */
 	u32 lrfr;		/* 0x40 */
@@ -230,6 +229,16 @@
 	u32 rxfir;		/* 0x4c */
 	u32 _reserved3[12];	/* 0x50 */
 	struct flexcan_mb cantxfg[64];	/* 0x80 */
+	/* FIFO-mode:
+	 *			MB
+	 * 0x080...0x08f	0	RX message buffer
+	 * 0x090...0x0df	1-5	reserverd
+	 * 0x0e0...0x0ff	6-7	8 entry ID table
+	 *				(mx25, mx28, mx35, mx53)
+	 * 0x0e0...0x2df	6-7..37	8..128 entry ID table
+	 *			  	size conf'ed via ctrl2::RFFN
+	 *				(mx6, vf610)
+	 */
 	u32 _reserved4[408];
 	u32 mecr;		/* 0xae0 */
 	u32 erriar;		/* 0xae4 */
@@ -468,7 +477,7 @@
 	struct flexcan_regs __iomem *regs = priv->base;
 	struct can_frame *cf = (struct can_frame *)skb->data;
 	u32 can_id;
-	u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
+	u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
 
 	if (can_dropped_invalid_skb(dev, skb))
 		return NETDEV_TX_OK;
@@ -815,7 +824,7 @@
 {
 	struct flexcan_priv *priv = netdev_priv(dev);
 	struct flexcan_regs __iomem *regs = priv->base;
-	u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr;
+	u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
 	int err, i;
 
 	/* enable module */
@@ -918,9 +927,9 @@
 		 * and Correction of Memory Errors" to write to
 		 * MECR register
 		 */
-		reg_crl2 = flexcan_read(&regs->crl2);
-		reg_crl2 |= FLEXCAN_CRL2_ECRWRE;
-		flexcan_write(reg_crl2, &regs->crl2);
+		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+		flexcan_write(reg_ctrl2, &regs->ctrl2);
 
 		reg_mecr = flexcan_read(&regs->mecr);
 		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 4dd183a..c1e8536 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -40,6 +40,7 @@
 #define MSYNC_PEER		0x00		/* ICAN only */
 #define MSYNC_LOCL		0x01		/* host only */
 #define TARGET_RUNNING		0x02
+#define FIRMWARE_STAMP		0x60		/* big endian firmware stamp */
 
 #define MSYNC_RB0		0x01
 #define MSYNC_RB1		0x02
@@ -83,6 +84,7 @@
 #define MSG_COFFREQ		0x42
 #define MSG_CONREQ		0x43
 #define MSG_CCONFREQ		0x47
+#define MSG_LMTS		0xb4
 
 /*
  * Janz ICAN3 CAN Inquiry Message Types
@@ -165,6 +167,12 @@
 /* SJA1000 Clock Input */
 #define ICAN3_CAN_CLOCK		8000000
 
+/* Janz ICAN3 firmware types */
+enum ican3_fwtype {
+	ICAN3_FWTYPE_ICANOS,
+	ICAN3_FWTYPE_CAL_CANOPEN,
+};
+
 /* Driver Name */
 #define DRV_NAME "janz-ican3"
 
@@ -215,6 +223,10 @@
 	struct completion buserror_comp;
 	struct can_berr_counter bec;
 
+	/* firmware type */
+	enum ican3_fwtype fwtype;
+	char fwinfo[32];
+
 	/* old and new style host interface */
 	unsigned int iftype;
 
@@ -750,13 +762,61 @@
  */
 static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
 {
+	struct can_bittiming *bt = &mod->can.bittiming;
 	struct ican3_msg msg;
+	u8 btr0, btr1;
+	int res;
 
-	memset(&msg, 0, sizeof(msg));
-	msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
-	msg.len = cpu_to_le16(0);
+	/* This algorithm was stolen from drivers/net/can/sja1000/sja1000.c      */
+	/* The bittiming register command for the ICAN3 just sets the bit timing */
+	/* registers on the SJA1000 chip directly                                */
+	btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+	btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+		(((bt->phase_seg2 - 1) & 0x7) << 4);
+	if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+		btr1 |= 0x80;
 
-	return ican3_send_msg(mod, &msg);
+	if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+		if (on) {
+			/* set bittiming */
+			memset(&msg, 0, sizeof(msg));
+			msg.spec = MSG_CBTRREQ;
+			msg.len = cpu_to_le16(4);
+			msg.data[0] = 0x00;
+			msg.data[1] = 0x00;
+			msg.data[2] = btr0;
+			msg.data[3] = btr1;
+
+			res = ican3_send_msg(mod, &msg);
+			if (res)
+				return res;
+		}
+
+		/* can-on/off request */
+		memset(&msg, 0, sizeof(msg));
+		msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+		msg.len = cpu_to_le16(0);
+
+		return ican3_send_msg(mod, &msg);
+
+	} else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+		memset(&msg, 0, sizeof(msg));
+		msg.spec = MSG_LMTS;
+		if (on) {
+			msg.len = cpu_to_le16(4);
+			msg.data[0] = 0;
+			msg.data[1] = 0;
+			msg.data[2] = btr0;
+			msg.data[3] = btr1;
+		} else {
+			msg.len = cpu_to_le16(2);
+			msg.data[0] = 1;
+			msg.data[1] = 0;
+		}
+
+		return ican3_send_msg(mod, &msg);
+	}
+	return -ENOTSUPP;
 }
 
 static int ican3_set_termination(struct ican3_dev *mod, bool on)
@@ -1402,7 +1462,7 @@
 			return 0;
 
 		msleep(10);
-	} while (time_before(jiffies, start + HZ / 4));
+	} while (time_before(jiffies, start + HZ / 2));
 
 	netdev_err(mod->ndev, "failed to reset CAN module\n");
 	return -ETIMEDOUT;
@@ -1427,6 +1487,17 @@
 		return ret;
 	}
 
+	/* detect firmware */
+	memcpy_fromio(mod->fwinfo, mod->dpm + FIRMWARE_STAMP, sizeof(mod->fwinfo) - 1);
+	if (strncmp(mod->fwinfo, "JANZ-ICAN3", 10)) {
+		netdev_err(mod->ndev, "ICAN3 not detected (found %s)\n", mod->fwinfo);
+		return -ENODEV;
+	}
+	if (strstr(mod->fwinfo, "CAL/CANopen"))
+		mod->fwtype = ICAN3_FWTYPE_CAL_CANOPEN;
+	else
+		mod->fwtype = ICAN3_FWTYPE_ICANOS;
+
 	/* re-enable interrupts so we can send messages */
 	iowrite8(1 << mod->num, &mod->ctrl->int_enable);
 
@@ -1615,36 +1686,6 @@
 	.brp_inc = 1,
 };
 
-/*
- * This routine was stolen from drivers/net/can/sja1000/sja1000.c
- *
- * The bittiming register command for the ICAN3 just sets the bit timing
- * registers on the SJA1000 chip directly
- */
-static int ican3_set_bittiming(struct net_device *ndev)
-{
-	struct ican3_dev *mod = netdev_priv(ndev);
-	struct can_bittiming *bt = &mod->can.bittiming;
-	struct ican3_msg msg;
-	u8 btr0, btr1;
-
-	btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
-	btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
-		(((bt->phase_seg2 - 1) & 0x7) << 4);
-	if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
-		btr1 |= 0x80;
-
-	memset(&msg, 0, sizeof(msg));
-	msg.spec = MSG_CBTRREQ;
-	msg.len = cpu_to_le16(4);
-	msg.data[0] = 0x00;
-	msg.data[1] = 0x00;
-	msg.data[2] = btr0;
-	msg.data[3] = btr1;
-
-	return ican3_send_msg(mod, &msg);
-}
-
 static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
 {
 	struct ican3_dev *mod = netdev_priv(ndev);
@@ -1730,11 +1771,22 @@
 	return count;
 }
 
+static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+}
+
 static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
 						   ican3_sysfs_set_term);
+static DEVICE_ATTR(fwinfo, S_IRUSR | S_IRUGO, ican3_sysfs_show_fwinfo, NULL);
 
 static struct attribute *ican3_sysfs_attrs[] = {
 	&dev_attr_termination.attr,
+	&dev_attr_fwinfo.attr,
 	NULL,
 };
 
@@ -1794,7 +1846,6 @@
 
 	mod->can.clock.freq = ICAN3_CAN_CLOCK;
 	mod->can.bittiming_const = &ican3_bittiming_const;
-	mod->can.do_set_bittiming = ican3_set_bittiming;
 	mod->can.do_set_mode = ican3_set_mode;
 	mod->can.do_get_berr_counter = ican3_get_berr_counter;
 	mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
@@ -1866,7 +1917,7 @@
 		goto out_free_irq;
 	}
 
-	dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+	netdev_info(mod->ndev, "module %d: registered CAN device\n", pdata->modno);
 	return 0;
 
 out_free_irq:
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index c837eb9..f64f529 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,6 +207,7 @@
 	if (!skb)
 		return;
 
+	__net_timestamp(skb);
 	skb->dev = sl->dev;
 	skb->protocol = htons(ETH_P_CAN);
 	skb->pkt_type = PACKET_BROADCAST;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bf63fee..c1a95a3 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -190,10 +190,11 @@
 #define RXBEID0_OFF 4
 #define RXBDLC_OFF  5
 #define RXBDAT_OFF  6
-#define RXFSIDH(n) ((n) * 4)
-#define RXFSIDL(n) ((n) * 4 + 1)
-#define RXFEID8(n) ((n) * 4 + 2)
-#define RXFEID0(n) ((n) * 4 + 3)
+#define RXFSID(n) ((n < 3) ? 0 : 4)
+#define RXFSIDH(n) ((n) * 4 + RXFSID(n))
+#define RXFSIDL(n) ((n) * 4 + 1 + RXFSID(n))
+#define RXFEID8(n) ((n) * 4 + 2 + RXFSID(n))
+#define RXFEID0(n) ((n) * 4 + 3 + RXFSID(n))
 #define RXMSIDH(n) ((n) * 4 + 0x20)
 #define RXMSIDL(n) ((n) * 4 + 0x21)
 #define RXMEID8(n) ((n) * 4 + 0x22)
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 674f367..0ce868d 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,6 +78,9 @@
 	skb->dev       = dev;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+	if (!(skb->tstamp.tv64))
+		__net_timestamp(skb);
+
 	netif_rx_ni(skb);
 }
 
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 18550c7..7ad0a4d 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,22 +37,22 @@
 	  ethernet switch chips.
 
 config NET_DSA_MV88E6171
-	tristate "Marvell 88E6171/6172 ethernet switch chip support"
+	tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
 	depends on NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
-	  This enables support for the Marvell 88E6171/6172 ethernet switch
-	  chips.
+	  This enables support for the Marvell 88E6171/6175/6350/6351
+	  ethernet switches chips.
 
 config NET_DSA_MV88E6352
-	tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+	tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
 	depends on NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
-	  This enables support for the Marvell 88E6176 and 88E6352 ethernet
-	  switch chips.
+	  This enables support for the Marvell 88E6172, 88E6176 and 88E6352
+	  ethernet switch chips.
 
 config NET_DSA_BCM_SF2
 	tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cedb572..972982f 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -24,6 +24,7 @@
 #include <net/dsa.h>
 #include <linux/ethtool.h>
 #include <linux/if_bridge.h>
+#include <linux/brcmphy.h>
 
 #include "bcm_sf2.h"
 #include "bcm_sf2_regs.h"
@@ -697,7 +698,7 @@
 	/* Include the pseudo-PHY address and the broadcast PHY address to
 	 * divert reads towards our workaround
 	 */
-	ds->phys_mii_mask |= ((1 << 30) | (1 << 0));
+	ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
 
 	rev = reg_readl(priv, REG_SWITCH_REVISION);
 	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
@@ -782,7 +783,7 @@
 	 */
 	switch (addr) {
 	case 0:
-	case 30:
+	case BRCM_PSEUDO_PHY_ADDR:
 		return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
 	default:
 		return 0xffff;
@@ -797,7 +798,7 @@
 	 */
 	switch (addr) {
 	case 0:
-	case 30:
+	case BRCM_PSEUDO_PHY_ADDR:
 		bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
 		break;
 	}
@@ -911,6 +912,13 @@
 	 */
 	if (port == 7) {
 		status->link = priv->port_sts[port].link;
+		/* For MoCA interfaces, also force a link down notification
+		 * since some version of the user-space daemon (mocad) use
+		 * cmd->autoneg to force the link, which messes up the PHY
+		 * state machine and make it go in PHY_FORCING state instead.
+		 */
+		if (!status->link)
+			netif_carrier_off(ds->ports[port]);
 		status->duplex = 1;
 	} else {
 		status->link = 1;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index b4af6d5..71a29a7 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -54,192 +54,40 @@
 
 static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
 {
+	u32 upstream_port = dsa_upstream_port(ds);
 	int ret;
-	int i;
+	u32 reg;
+
+	ret = mv88e6xxx_setup_global(ds);
+	if (ret)
+		return ret;
 
 	/* Disable the PHY polling unit (since there won't be any
 	 * external PHYs to poll), don't discard packets with
 	 * excessive collisions, and mask all interrupt sources.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
-
-	/* Set the default address aging time to 5 minutes, and
-	 * enable address learn messages to be sent to all message
-	 * ports.
-	 */
-	REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-	/* Configure the priority mapping registers. */
-	ret = mv88e6xxx_config_prio(ds);
-	if (ret < 0)
-		return ret;
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
 
 	/* Configure the upstream port, and configure the upstream
 	 * port as the port to which ingress and egress monitor frames
 	 * are to be sent.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
 	/* Disable remote management for now, and set the switch's
 	 * DSA device number.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:2x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:0x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-	/* Disable the loopback filter, disable flow control
-	 * messages, disable flood broadcast override, disable
-	 * removing of provider tags, disable ATU age violation
-	 * interrupts, disable tag flow control, force flow
-	 * control priority to the highest, and send all special
-	 * multicast frames to the CPU at the highest priority.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-	/* Program the DSA routing table. */
-	for (i = 0; i < 32; i++) {
-		int nexthop;
-
-		nexthop = 0x1f;
-		if (i != ds->index && i < ds->dst->pd->nr_chips)
-			nexthop = ds->pd->rtable[i] & 0x1f;
-
-		REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-	}
-
-	/* Clear all trunk masks. */
-	for (i = 0; i < 8; i++)
-		REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
-	/* Clear all trunk mappings. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
-	/* Disable ingress rate limiting by resetting all ingress
-	 * rate limit registers to their initial state.
-	 */
-	for (i = 0; i < 6; i++)
-		REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
-	/* Initialise cross-chip port VLAN table to reset defaults. */
-	REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
-	/* Clear the priority override table. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
-	/* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
 
 	return 0;
 }
 
-static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
-{
-	int addr = REG_PORT(p);
-	u16 val;
-
-	/* MAC Forcing register: don't force link, speed, duplex
-	 * or flow control state to any particular values on physical
-	 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-	 * full duplex.
-	 */
-	if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-		REG_WRITE(addr, 0x01, 0x003e);
-	else
-		REG_WRITE(addr, 0x01, 0x0003);
-
-	/* Do not limit the period of time that this port can be
-	 * paused for by the remote end or the period of time that
-	 * this port can pause the remote end.
-	 */
-	REG_WRITE(addr, 0x02, 0x0000);
-
-	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-	 * tunneling, determine priority by looking at 802.1p and IP
-	 * priority fields (IP prio has precedence), and set STP state
-	 * to Forwarding.
-	 *
-	 * If this is the CPU link, use DSA or EDSA tagging depending
-	 * on which tagging mode was configured.
-	 *
-	 * If this is a link to another switch, use DSA tagging mode.
-	 *
-	 * If this is the upstream port for this switch, enable
-	 * forwarding of unknown unicasts and multicasts.
-	 */
-	val = 0x0433;
-	if (dsa_is_cpu_port(ds, p)) {
-		if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-			val |= 0x3300;
-		else
-			val |= 0x0100;
-	}
-	if (ds->dsa_port_mask & (1 << p))
-		val |= 0x0100;
-	if (p == dsa_upstream_port(ds))
-		val |= 0x000c;
-	REG_WRITE(addr, 0x04, val);
-
-	/* Port Control 2: don't force a good FCS, set the maximum
-	 * frame size to 10240 bytes, don't let the switch add or
-	 * strip 802.1q tags, don't discard tagged or untagged frames
-	 * on this port, do a destination address lookup on all
-	 * received packets as usual, disable ARP mirroring and don't
-	 * send a copy of all transmitted/received frames on this port
-	 * to the CPU.
-	 */
-	REG_WRITE(addr, 0x08, 0x2080);
-
-	/* Egress rate control: disable egress rate control. */
-	REG_WRITE(addr, 0x09, 0x0001);
-
-	/* Egress rate control 2: disable egress rate control. */
-	REG_WRITE(addr, 0x0a, 0x0000);
-
-	/* Port Association Vector: when learning source addresses
-	 * of packets, add the address to the address database using
-	 * a port bitmap that has only the bit for this port set and
-	 * the other bits clear.
-	 */
-	REG_WRITE(addr, 0x0b, 1 << p);
-
-	/* Port ATU control: disable limiting the number of address
-	 * database entries that this port is allowed to use.
-	 */
-	REG_WRITE(addr, 0x0c, 0x0000);
-
-	/* Priority Override: disable DA, SA and VTU priority override. */
-	REG_WRITE(addr, 0x0d, 0x0000);
-
-	/* Port Ethertype: use the Ethertype DSA Ethertype value. */
-	REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
-	/* Tag Remap: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x18, 0x3210);
-
-	/* Tag Remap 2: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x19, 0x7654);
-
-	return mv88e6xxx_setup_port_common(ds, p);
-}
-
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int i;
 	int ret;
 
 	ret = mv88e6xxx_setup_common(ds);
@@ -262,19 +110,11 @@
 	if (ret < 0)
 		return ret;
 
-	/* @@@ initialise vtu and atu */
-
 	ret = mv88e6123_61_65_setup_global(ds);
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < ps->num_ports; i++) {
-		ret = mv88e6123_61_65_setup_port(ds, i);
-		if (ret < 0)
-			return ret;
-	}
-
-	return 0;
+	return mv88e6xxx_setup_ports(ds);
 }
 
 struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index e54824f..32f4a08 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -37,6 +37,8 @@
 			return "Marvell 88E6131 (B2)";
 		if (ret_masked == PORT_SWITCH_ID_6131)
 			return "Marvell 88E6131";
+		if (ret_masked == PORT_SWITCH_ID_6185)
+			return "Marvell 88E6185";
 	}
 
 	return NULL;
@@ -44,186 +46,62 @@
 
 static int mv88e6131_setup_global(struct dsa_switch *ds)
 {
+	u32 upstream_port = dsa_upstream_port(ds);
 	int ret;
-	int i;
+	u32 reg;
+
+	ret = mv88e6xxx_setup_global(ds);
+	if (ret)
+		return ret;
 
 	/* Enable the PHY polling unit, don't discard packets with
 	 * excessive collisions, use a weighted fair queueing scheme
 	 * to arbitrate between packet queues, set the maximum frame
 	 * size to 1632, and mask all interrupt sources.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
-
-	/* Set the default address aging time to 5 minutes, and
-	 * enable address learn messages to be sent to all message
-	 * ports.
-	 */
-	REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-	/* Configure the priority mapping registers. */
-	ret = mv88e6xxx_config_prio(ds);
-	if (ret < 0)
-		return ret;
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+		  GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
 
 	/* Set the VLAN ethertype to 0x8100. */
-	REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+	REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
 
 	/* Disable ARP mirroring, and configure the upstream port as
 	 * the port to which ingress and egress monitor frames are to
 	 * be sent.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+		GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
+	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
 	/* Disable cascade port functionality unless this device
 	 * is used in a cascade configuration, and set the switch's
 	 * DSA device number.
 	 */
 	if (ds->dst->pd->nr_chips > 1)
-		REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+		REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+			  GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+			  (ds->index & 0x1f));
 	else
-		REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:0x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-	/* Ignore removed tag data on doubly tagged packets, disable
-	 * flow control messages, force flow control priority to the
-	 * highest, and send all special multicast frames to the CPU
-	 * port at the highest priority.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-	/* Program the DSA routing table. */
-	for (i = 0; i < 32; i++) {
-		int nexthop;
-
-		nexthop = 0x1f;
-		if (ds->pd->rtable &&
-		    i != ds->index && i < ds->dst->pd->nr_chips)
-			nexthop = ds->pd->rtable[i] & 0x1f;
-
-		REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-	}
-
-	/* Clear all trunk masks. */
-	for (i = 0; i < 8; i++)
-		REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
-
-	/* Clear all trunk mappings. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+		REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+			  GLOBAL_CONTROL_2_NO_CASCADE |
+			  (ds->index & 0x1f));
 
 	/* Force the priority of IGMP/MLD snoop frames and ARP frames
 	 * to the highest setting.
 	 */
-	REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+	REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+		  GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
+		  7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
+		  GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
+		  7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
 
 	return 0;
 }
 
-static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int addr = REG_PORT(p);
-	u16 val;
-
-	/* MAC Forcing register: don't force link, speed, duplex
-	 * or flow control state to any particular values on physical
-	 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-	 * (100 Mb/s on 6085) full duplex.
-	 */
-	if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-		if (ps->id == PORT_SWITCH_ID_6085)
-			REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
-		else
-			REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
-	else
-		REG_WRITE(addr, 0x01, 0x0003);
-
-	/* Port Control: disable Core Tag, disable Drop-on-Lock,
-	 * transmit frames unmodified, disable Header mode,
-	 * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
-	 * tunneling, determine priority by looking at 802.1p and
-	 * IP priority fields (IP prio has precedence), and set STP
-	 * state to Forwarding.
-	 *
-	 * If this is the upstream port for this switch, enable
-	 * forwarding of unknown unicasts, and enable DSA tagging
-	 * mode.
-	 *
-	 * If this is the link to another switch, use DSA tagging
-	 * mode, but do not enable forwarding of unknown unicasts.
-	 */
-	val = 0x0433;
-	if (p == dsa_upstream_port(ds)) {
-		val |= 0x0104;
-		/* On 6085, unknown multicast forward is controlled
-		 * here rather than in Port Control 2 register.
-		 */
-		if (ps->id == PORT_SWITCH_ID_6085)
-			val |= 0x0008;
-	}
-	if (ds->dsa_port_mask & (1 << p))
-		val |= 0x0100;
-	REG_WRITE(addr, 0x04, val);
-
-	/* Port Control 2: don't force a good FCS, don't use
-	 * VLAN-based, source address-based or destination
-	 * address-based priority overrides, don't let the switch
-	 * add or strip 802.1q tags, don't discard tagged or
-	 * untagged frames on this port, do a destination address
-	 * lookup on received packets as usual, don't send a copy
-	 * of all transmitted/received frames on this port to the
-	 * CPU, and configure the upstream port number.
-	 *
-	 * If this is the upstream port for this switch, enable
-	 * forwarding of unknown multicast addresses.
-	 */
-	if (ps->id == PORT_SWITCH_ID_6085)
-		/* on 6085, bits 3:0 are reserved, bit 6 control ARP
-		 * mirroring, and multicast forward is handled in
-		 * Port Control register.
-		 */
-		REG_WRITE(addr, 0x08, 0x0080);
-	else {
-		val = 0x0080 | dsa_upstream_port(ds);
-		if (p == dsa_upstream_port(ds))
-			val |= 0x0040;
-		REG_WRITE(addr, 0x08, val);
-	}
-
-	/* Rate Control: disable ingress rate limiting. */
-	REG_WRITE(addr, 0x09, 0x0000);
-
-	/* Rate Control 2: disable egress rate limiting. */
-	REG_WRITE(addr, 0x0a, 0x0000);
-
-	/* Port Association Vector: when learning source addresses
-	 * of packets, add the address to the address database using
-	 * a port bitmap that has only the bit for this port set and
-	 * the other bits clear.
-	 */
-	REG_WRITE(addr, 0x0b, 1 << p);
-
-	/* Tag Remap: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x18, 0x3210);
-
-	/* Tag Remap 2: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x19, 0x7654);
-
-	return mv88e6xxx_setup_port_common(ds, p);
-}
-
 static int mv88e6131_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int i;
 	int ret;
 
 	ret = mv88e6xxx_setup_common(ds);
@@ -234,6 +112,7 @@
 
 	switch (ps->id) {
 	case PORT_SWITCH_ID_6085:
+	case PORT_SWITCH_ID_6185:
 		ps->num_ports = 10;
 		break;
 	case PORT_SWITCH_ID_6095:
@@ -251,19 +130,11 @@
 	if (ret < 0)
 		return ret;
 
-	/* @@@ initialise vtu and atu */
-
 	ret = mv88e6131_setup_global(ds);
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < ps->num_ports; i++) {
-		ret = mv88e6131_setup_port(ds, i);
-		if (ret < 0)
-			return ret;
-	}
-
-	return 0;
+	return mv88e6xxx_setup_ports(ds);
 }
 
 static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 9104efe..1c78084 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
  * Copyright (c) 2008-2009 Marvell Semiconductor
  * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
  *
@@ -29,8 +29,12 @@
 	if (ret >= 0) {
 		if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
 			return "Marvell 88E6171";
-		if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
-			return "Marvell 88E6172";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6175)
+			return "Marvell 88E6175";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6350)
+			return "Marvell 88E6350";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6351)
+			return "Marvell 88E6351";
 	}
 
 	return NULL;
@@ -38,196 +42,41 @@
 
 static int mv88e6171_setup_global(struct dsa_switch *ds)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u32 upstream_port = dsa_upstream_port(ds);
 	int ret;
-	int i;
+	u32 reg;
+
+	ret = mv88e6xxx_setup_global(ds);
+	if (ret)
+		return ret;
 
 	/* Discard packets with excessive collisions, mask all
 	 * interrupt sources, enable PPU.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
-	/* Set the default address aging time to 5 minutes, and
-	 * enable address learn messages to be sent to all message
-	 * ports.
-	 */
-	REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-	/* Configure the priority mapping registers. */
-	ret = mv88e6xxx_config_prio(ds);
-	if (ret < 0)
-		return ret;
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+		  GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
 
 	/* Configure the upstream port, and configure the upstream
 	 * port as the port to which ingress and egress monitor frames
 	 * are to be sent.
 	 */
-	if (REG_READ(REG_PORT(0), 0x03) == 0x1710)
-		REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1111));
-	else
-		REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
+	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
 	/* Disable remote management for now, and set the switch's
 	 * DSA device number.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:2x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:0x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-	/* Disable the loopback filter, disable flow control
-	 * messages, disable flood broadcast override, disable
-	 * removing of provider tags, disable ATU age violation
-	 * interrupts, disable tag flow control, force flow
-	 * control priority to the highest, and send all special
-	 * multicast frames to the CPU at the highest priority.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-	/* Program the DSA routing table. */
-	for (i = 0; i < 32; i++) {
-		int nexthop;
-
-		nexthop = 0x1f;
-		if (i != ds->index && i < ds->dst->pd->nr_chips)
-			nexthop = ds->pd->rtable[i] & 0x1f;
-
-		REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-	}
-
-	/* Clear all trunk masks. */
-	for (i = 0; i < ps->num_ports; i++)
-		REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
-	/* Clear all trunk mappings. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
-	/* Disable ingress rate limiting by resetting all ingress
-	 * rate limit registers to their initial state.
-	 */
-	for (i = 0; i < 6; i++)
-		REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
-	/* Initialise cross-chip port VLAN table to reset defaults. */
-	REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
-	/* Clear the priority override table. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
-	/* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
 
 	return 0;
 }
 
-static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
-{
-	int addr = REG_PORT(p);
-	u16 val;
-
-	/* MAC Forcing register: don't force link, speed, duplex
-	 * or flow control state to any particular values on physical
-	 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-	 * full duplex.
-	 */
-	val = REG_READ(addr, 0x01);
-	if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-		REG_WRITE(addr, 0x01, val | 0x003e);
-	else
-		REG_WRITE(addr, 0x01, val | 0x0003);
-
-	/* Do not limit the period of time that this port can be
-	 * paused for by the remote end or the period of time that
-	 * this port can pause the remote end.
-	 */
-	REG_WRITE(addr, 0x02, 0x0000);
-
-	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-	 * tunneling, determine priority by looking at 802.1p and IP
-	 * priority fields (IP prio has precedence), and set STP state
-	 * to Forwarding.
-	 *
-	 * If this is the CPU link, use DSA or EDSA tagging depending
-	 * on which tagging mode was configured.
-	 *
-	 * If this is a link to another switch, use DSA tagging mode.
-	 *
-	 * If this is the upstream port for this switch, enable
-	 * forwarding of unknown unicasts and multicasts.
-	 */
-	val = 0x0433;
-	if (dsa_is_cpu_port(ds, p)) {
-		if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-			val |= 0x3300;
-		else
-			val |= 0x0100;
-	}
-	if (ds->dsa_port_mask & (1 << p))
-		val |= 0x0100;
-	if (p == dsa_upstream_port(ds))
-		val |= 0x000c;
-	REG_WRITE(addr, 0x04, val);
-
-	/* Port Control 2: don't force a good FCS, set the maximum
-	 * frame size to 10240 bytes, don't let the switch add or
-	 * strip 802.1q tags, don't discard tagged or untagged frames
-	 * on this port, do a destination address lookup on all
-	 * received packets as usual, disable ARP mirroring and don't
-	 * send a copy of all transmitted/received frames on this port
-	 * to the CPU.
-	 */
-	REG_WRITE(addr, 0x08, 0x2080);
-
-	/* Egress rate control: disable egress rate control. */
-	REG_WRITE(addr, 0x09, 0x0001);
-
-	/* Egress rate control 2: disable egress rate control. */
-	REG_WRITE(addr, 0x0a, 0x0000);
-
-	/* Port Association Vector: when learning source addresses
-	 * of packets, add the address to the address database using
-	 * a port bitmap that has only the bit for this port set and
-	 * the other bits clear.
-	 */
-	REG_WRITE(addr, 0x0b, 1 << p);
-
-	/* Port ATU control: disable limiting the number of address
-	 * database entries that this port is allowed to use.
-	 */
-	REG_WRITE(addr, 0x0c, 0x0000);
-
-	/* Priority Override: disable DA, SA and VTU priority override. */
-	REG_WRITE(addr, 0x0d, 0x0000);
-
-	/* Port Ethertype: use the Ethertype DSA Ethertype value. */
-	REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
-	/* Tag Remap: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x18, 0x3210);
-
-	/* Tag Remap 2: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x19, 0x7654);
-
-	return mv88e6xxx_setup_port_common(ds, p);
-}
-
 static int mv88e6171_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int i;
 	int ret;
 
 	ret = mv88e6xxx_setup_common(ds);
@@ -240,44 +89,11 @@
 	if (ret < 0)
 		return ret;
 
-	/* @@@ initialise vtu and atu */
-
 	ret = mv88e6171_setup_global(ds);
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < ps->num_ports; i++) {
-		if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
-			continue;
-
-		ret = mv88e6171_setup_port(ds, i);
-		if (ret < 0)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
-			     struct ethtool_eee *e)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	if (ps->id == PORT_SWITCH_ID_6172)
-		return mv88e6xxx_get_eee(ds, port, e);
-
-	return -EOPNOTSUPP;
-}
-
-static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
-			     struct phy_device *phydev, struct ethtool_eee *e)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	if (ps->id == PORT_SWITCH_ID_6172)
-		return mv88e6xxx_set_eee(ds, port, phydev, e);
-
-	return -EOPNOTSUPP;
+	return mv88e6xxx_setup_ports(ds);
 }
 
 struct dsa_switch_driver mv88e6171_switch_driver = {
@@ -292,8 +108,6 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
-	.set_eee		= mv88e6171_set_eee,
-	.get_eee		= mv88e6171_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
 	.get_temp               = mv88e6xxx_get_temp,
 #endif
@@ -308,4 +122,6 @@
 };
 
 MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6175");
+MODULE_ALIAS("platform:mv88e6350");
+MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 126c11b..632815c 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -32,6 +32,8 @@
 
 	ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
 	if (ret >= 0) {
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
+			return "Marvell 88E6172";
 		if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
 			return "Marvell 88E6176";
 		if (ret == PORT_SWITCH_ID_6352_A0)
@@ -47,187 +49,37 @@
 
 static int mv88e6352_setup_global(struct dsa_switch *ds)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u32 upstream_port = dsa_upstream_port(ds);
 	int ret;
-	int i;
+	u32 reg;
+
+	ret = mv88e6xxx_setup_global(ds);
+	if (ret)
+		return ret;
 
 	/* Discard packets with excessive collisions,
 	 * mask all interrupt sources, enable PPU (bit 14, undocumented).
 	 */
-	REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
-	/* Set the default address aging time to 5 minutes, and
-	 * enable address learn messages to be sent to all message
-	 * ports.
-	 */
-	REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-	/* Configure the priority mapping registers. */
-	ret = mv88e6xxx_config_prio(ds);
-	if (ret < 0)
-		return ret;
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+		  GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
 
 	/* Configure the upstream port, and configure the upstream
 	 * port as the port to which ingress and egress monitor frames
 	 * are to be sent.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
 	/* Disable remote management for now, and set the switch's
 	 * DSA device number.
 	 */
 	REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
 
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:2x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:0x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-	/* Disable the loopback filter, disable flow control
-	 * messages, disable flood broadcast override, disable
-	 * removing of provider tags, disable ATU age violation
-	 * interrupts, disable tag flow control, force flow
-	 * control priority to the highest, and send all special
-	 * multicast frames to the CPU at the highest priority.
-	 */
-	REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-	/* Program the DSA routing table. */
-	for (i = 0; i < 32; i++) {
-		int nexthop = 0x1f;
-
-		if (i != ds->index && i < ds->dst->pd->nr_chips)
-			nexthop = ds->pd->rtable[i] & 0x1f;
-
-		REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-	}
-
-	/* Clear all trunk masks. */
-	for (i = 0; i < 8; i++)
-		REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
-
-	/* Clear all trunk mappings. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
-	/* Disable ingress rate limiting by resetting all ingress
-	 * rate limit registers to their initial state.
-	 */
-	for (i = 0; i < ps->num_ports; i++)
-		REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
-	/* Initialise cross-chip port VLAN table to reset defaults. */
-	REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
-	/* Clear the priority override table. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
-	/* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
-
 	return 0;
 }
 
-static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
-{
-	int addr = REG_PORT(p);
-	u16 val;
-
-	/* MAC Forcing register: don't force link, speed, duplex
-	 * or flow control state to any particular values on physical
-	 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-	 * full duplex.
-	 */
-	if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-		REG_WRITE(addr, 0x01, 0x003e);
-	else
-		REG_WRITE(addr, 0x01, 0x0003);
-
-	/* Do not limit the period of time that this port can be
-	 * paused for by the remote end or the period of time that
-	 * this port can pause the remote end.
-	 */
-	REG_WRITE(addr, 0x02, 0x0000);
-
-	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-	 * tunneling, determine priority by looking at 802.1p and IP
-	 * priority fields (IP prio has precedence), and set STP state
-	 * to Forwarding.
-	 *
-	 * If this is the CPU link, use DSA or EDSA tagging depending
-	 * on which tagging mode was configured.
-	 *
-	 * If this is a link to another switch, use DSA tagging mode.
-	 *
-	 * If this is the upstream port for this switch, enable
-	 * forwarding of unknown unicasts and multicasts.
-	 */
-	val = 0x0433;
-	if (dsa_is_cpu_port(ds, p)) {
-		if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-			val |= 0x3300;
-		else
-			val |= 0x0100;
-	}
-	if (ds->dsa_port_mask & (1 << p))
-		val |= 0x0100;
-	if (p == dsa_upstream_port(ds))
-		val |= 0x000c;
-	REG_WRITE(addr, 0x04, val);
-
-	/* Port Control 2: don't force a good FCS, set the maximum
-	 * frame size to 10240 bytes, don't let the switch add or
-	 * strip 802.1q tags, don't discard tagged or untagged frames
-	 * on this port, do a destination address lookup on all
-	 * received packets as usual, disable ARP mirroring and don't
-	 * send a copy of all transmitted/received frames on this port
-	 * to the CPU.
-	 */
-	REG_WRITE(addr, 0x08, 0x2080);
-
-	/* Egress rate control: disable egress rate control. */
-	REG_WRITE(addr, 0x09, 0x0001);
-
-	/* Egress rate control 2: disable egress rate control. */
-	REG_WRITE(addr, 0x0a, 0x0000);
-
-	/* Port Association Vector: when learning source addresses
-	 * of packets, add the address to the address database using
-	 * a port bitmap that has only the bit for this port set and
-	 * the other bits clear.
-	 */
-	REG_WRITE(addr, 0x0b, 1 << p);
-
-	/* Port ATU control: disable limiting the number of address
-	 * database entries that this port is allowed to use.
-	 */
-	REG_WRITE(addr, 0x0c, 0x0000);
-
-	/* Priority Override: disable DA, SA and VTU priority override. */
-	REG_WRITE(addr, 0x0d, 0x0000);
-
-	/* Port Ethertype: use the Ethertype DSA Ethertype value. */
-	REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
-	/* Tag Remap: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x18, 0x3210);
-
-	/* Tag Remap 2: use an identity 802.1p prio -> switch prio
-	 * mapping.
-	 */
-	REG_WRITE(addr, 0x19, 0x7654);
-
-	return mv88e6xxx_setup_port_common(ds, p);
-}
-
 #ifdef CONFIG_NET_DSA_HWMON
 
 static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
@@ -292,7 +144,6 @@
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
-	int i;
 
 	ret = mv88e6xxx_setup_common(ds);
 	if (ret < 0)
@@ -306,19 +157,11 @@
 	if (ret < 0)
 		return ret;
 
-	/* @@@ initialise vtu and atu */
-
 	ret = mv88e6352_setup_global(ds);
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < ps->num_ports; i++) {
-		ret = mv88e6352_setup_port(ds, i);
-		if (ret < 0)
-			return ret;
-	}
-
-	return 0;
+	return mv88e6xxx_setup_ports(ds);
 }
 
 static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
@@ -552,3 +395,4 @@
 };
 
 MODULE_ALIAS("platform:mv88e6352");
+MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf309aa9..fd8547c 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -8,6 +8,7 @@
  * (at your option) any later version.
  */
 
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
 #include <linux/if_bridge.h>
@@ -16,9 +17,38 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
+#include <linux/seq_file.h>
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
+/* MDIO bus access can be nested in the case of PHYs connected to the
+ * internal MDIO bus of the switch, which is accessed via MDIO bus of
+ * the Ethernet interface. Avoid lockdep false positives by using
+ * mutex_lock_nested().
+ */
+static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+	int ret;
+
+	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	ret = bus->read(bus, addr, regnum);
+	mutex_unlock(&bus->mdio_lock);
+
+	return ret;
+}
+
+static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
+				   u16 val)
+{
+	int ret;
+
+	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	ret = bus->write(bus, addr, regnum, val);
+	mutex_unlock(&bus->mdio_lock);
+
+	return ret;
+}
+
 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  * will be directly accessible on some {device address,register address}
@@ -33,7 +63,7 @@
 	int i;
 
 	for (i = 0; i < 16; i++) {
-		ret = mdiobus_read(bus, sw_addr, SMI_CMD);
+		ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
 		if (ret < 0)
 			return ret;
 
@@ -49,7 +79,7 @@
 	int ret;
 
 	if (sw_addr == 0)
-		return mdiobus_read(bus, addr, reg);
+		return mv88e6xxx_mdiobus_read(bus, addr, reg);
 
 	/* Wait for the bus to become free. */
 	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -57,8 +87,8 @@
 		return ret;
 
 	/* Transmit the read command. */
-	ret = mdiobus_write(bus, sw_addr, SMI_CMD,
-			    SMI_CMD_OP_22_READ | (addr << 5) | reg);
+	ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+				      SMI_CMD_OP_22_READ | (addr << 5) | reg);
 	if (ret < 0)
 		return ret;
 
@@ -68,7 +98,7 @@
 		return ret;
 
 	/* Read the data. */
-	ret = mdiobus_read(bus, sw_addr, SMI_DATA);
+	ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
 	if (ret < 0)
 		return ret;
 
@@ -112,7 +142,7 @@
 	int ret;
 
 	if (sw_addr == 0)
-		return mdiobus_write(bus, addr, reg, val);
+		return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
 
 	/* Wait for the bus to become free. */
 	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -120,13 +150,13 @@
 		return ret;
 
 	/* Transmit the data to write. */
-	ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
+	ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
 	if (ret < 0)
 		return ret;
 
 	/* Transmit the write command. */
-	ret = mdiobus_write(bus, sw_addr, SMI_CMD,
-			    SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+	ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+				      SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
 	if (ret < 0)
 		return ret;
 
@@ -165,24 +195,6 @@
 	return ret;
 }
 
-int mv88e6xxx_config_prio(struct dsa_switch *ds)
-{
-	/* Configure the IP ToS mapping registers. */
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
-
-	/* Configure the IEEE 802.1p priority mapping register. */
-	REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
-
-	return 0;
-}
-
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 {
 	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
@@ -217,20 +229,20 @@
 	return 0;
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
 {
 	if (addr >= 0)
-		return mv88e6xxx_reg_read(ds, addr, regnum);
+		return _mv88e6xxx_reg_read(ds, addr, regnum);
 	return 0xffff;
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
 				u16 val)
 {
 	if (addr >= 0)
-		return mv88e6xxx_reg_write(ds, addr, regnum, val);
+		return _mv88e6xxx_reg_write(ds, addr, regnum, val);
 	return 0;
 }
 
@@ -434,26 +446,113 @@
 	}
 }
 
-static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
 	switch (ps->id) {
-	case PORT_SWITCH_ID_6352:
-	case PORT_SWITCH_ID_6172:
-	case PORT_SWITCH_ID_6176:
+	case PORT_SWITCH_ID_6031:
+	case PORT_SWITCH_ID_6061:
+	case PORT_SWITCH_ID_6035:
+	case PORT_SWITCH_ID_6065:
 		return true;
 	}
 	return false;
 }
 
-static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6092:
+	case PORT_SWITCH_ID_6095:
+		return true;
+	}
+	return false;
+}
+
+static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6046:
+	case PORT_SWITCH_ID_6085:
+	case PORT_SWITCH_ID_6096:
+	case PORT_SWITCH_ID_6097:
+		return true;
+	}
+	return false;
+}
+
+static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6123:
+	case PORT_SWITCH_ID_6161:
+	case PORT_SWITCH_ID_6165:
+		return true;
+	}
+	return false;
+}
+
+static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6121:
+	case PORT_SWITCH_ID_6122:
+	case PORT_SWITCH_ID_6152:
+	case PORT_SWITCH_ID_6155:
+	case PORT_SWITCH_ID_6182:
+	case PORT_SWITCH_ID_6185:
+	case PORT_SWITCH_ID_6108:
+	case PORT_SWITCH_ID_6131:
+		return true;
+	}
+	return false;
+}
+
+static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6171:
+	case PORT_SWITCH_ID_6175:
+	case PORT_SWITCH_ID_6350:
+	case PORT_SWITCH_ID_6351:
+		return true;
+	}
+	return false;
+}
+
+static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6172:
+	case PORT_SWITCH_ID_6176:
+	case PORT_SWITCH_ID_6240:
+	case PORT_SWITCH_ID_6352:
+		return true;
+	}
+	return false;
+}
+
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
 {
 	int ret;
 	int i;
 
 	for (i = 0; i < 10; i++) {
-		ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
 		if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
 			return 0;
 	}
@@ -461,7 +560,8 @@
 	return -ETIMEDOUT;
 }
 
-static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 {
 	int ret;
 
@@ -469,42 +569,45 @@
 		port = (port + 1) << 5;
 
 	/* Snapshot the hardware statistics counters for this port. */
-	REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
-		  GLOBAL_STATS_OP_CAPTURE_PORT |
-		  GLOBAL_STATS_OP_HIST_RX_TX | port);
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+				   GLOBAL_STATS_OP_CAPTURE_PORT |
+				   GLOBAL_STATS_OP_HIST_RX_TX | port);
+	if (ret < 0)
+		return ret;
 
 	/* Wait for the snapshotting to complete. */
-	ret = mv88e6xxx_stats_wait(ds);
+	ret = _mv88e6xxx_stats_wait(ds);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+/* Must be called with SMI mutex held */
+static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
 {
 	u32 _val;
 	int ret;
 
 	*val = 0;
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
-				  GLOBAL_STATS_OP_READ_CAPTURED |
-				  GLOBAL_STATS_OP_HIST_RX_TX | stat);
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+				   GLOBAL_STATS_OP_READ_CAPTURED |
+				   GLOBAL_STATS_OP_HIST_RX_TX | stat);
 	if (ret < 0)
 		return;
 
-	ret = mv88e6xxx_stats_wait(ds);
+	ret = _mv88e6xxx_stats_wait(ds);
 	if (ret < 0)
 		return;
 
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
 	if (ret < 0)
 		return;
 
 	_val = ret << 16;
 
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
 	if (ret < 0)
 		return;
 
@@ -578,6 +681,40 @@
 	}
 }
 
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
+					    int stat,
+					    struct mv88e6xxx_hw_stat *stats,
+					    int port)
+{
+	struct mv88e6xxx_hw_stat *s = stats + stat;
+	u32 low;
+	u32 high = 0;
+	int ret;
+	u64 value;
+
+	if (s->reg >= 0x100) {
+		ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+					  s->reg - 0x100);
+		if (ret < 0)
+			return UINT64_MAX;
+
+		low = ret;
+		if (s->sizeof_stat == 4) {
+			ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+						  s->reg - 0x100 + 1);
+			if (ret < 0)
+				return UINT64_MAX;
+			high = ret;
+		}
+	} else {
+		_mv88e6xxx_stats_read(ds, s->reg, &low);
+		if (s->sizeof_stat == 8)
+			_mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+	}
+	value = (((u64)high) << 16) | low;
+	return value;
+}
+
 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
 					 int nr_stats,
 					 struct mv88e6xxx_hw_stat *stats,
@@ -587,44 +724,19 @@
 	int ret;
 	int i;
 
-	mutex_lock(&ps->stats_mutex);
+	mutex_lock(&ps->smi_mutex);
 
-	ret = mv88e6xxx_stats_snapshot(ds, port);
+	ret = _mv88e6xxx_stats_snapshot(ds, port);
 	if (ret < 0) {
-		mutex_unlock(&ps->stats_mutex);
+		mutex_unlock(&ps->smi_mutex);
 		return;
 	}
 
 	/* Read each of the counters. */
-	for (i = 0; i < nr_stats; i++) {
-		struct mv88e6xxx_hw_stat *s = stats + i;
-		u32 low;
-		u32 high = 0;
+	for (i = 0; i < nr_stats; i++)
+		data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
 
-		if (s->reg >= 0x100) {
-			ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
-						 s->reg - 0x100);
-			if (ret < 0)
-				goto error;
-			low = ret;
-			if (s->sizeof_stat == 4) {
-				ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
-							 s->reg - 0x100 + 1);
-				if (ret < 0)
-					goto error;
-				high = ret;
-			}
-			data[i] = (((u64)high) << 16) | low;
-			continue;
-		}
-		mv88e6xxx_stats_read(ds, s->reg, &low);
-		if (s->sizeof_stat == 8)
-			mv88e6xxx_stats_read(ds, s->reg + 1, &high);
-
-		data[i] = (((u64)high) << 32) | low;
-	}
-error:
-	mutex_unlock(&ps->stats_mutex);
+	mutex_unlock(&ps->smi_mutex);
 }
 
 /* All the statistics in the table */
@@ -694,7 +806,7 @@
 
 	*temp = 0;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 
 	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
 	if (ret < 0)
@@ -727,47 +839,14 @@
 
 error:
 	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 #endif /* CONFIG_NET_DSA_HWMON */
 
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
-{
-	unsigned long timeout = jiffies + HZ / 10;
-
-	while (time_before(jiffies, timeout)) {
-		int ret;
-
-		ret = REG_READ(reg, offset);
-		if (!(ret & mask))
-			return 0;
-
-		usleep_range(1000, 2000);
-	}
-	return -ETIMEDOUT;
-}
-
-int mv88e6xxx_phy_wait(struct dsa_switch *ds)
-{
-	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
-			      GLOBAL2_SMI_OP_BUSY);
-}
-
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
-{
-	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
-			      GLOBAL2_EEPROM_OP_LOAD);
-}
-
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
-{
-	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
-			      GLOBAL2_EEPROM_OP_BUSY);
-}
-
 /* Must be called with SMI lock held */
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+			   u16 mask)
 {
 	unsigned long timeout = jiffies + HZ / 10;
 
@@ -785,6 +864,36 @@
 	return -ETIMEDOUT;
 }
 
+static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->smi_mutex);
+	ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+	mutex_unlock(&ps->smi_mutex);
+
+	return ret;
+}
+
+static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
+{
+	return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+			       GLOBAL2_SMI_OP_BUSY);
+}
+
+int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+{
+	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+			      GLOBAL2_EEPROM_OP_LOAD);
+}
+
+int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+{
+	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+			      GLOBAL2_EEPROM_OP_BUSY);
+}
+
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
 {
@@ -792,31 +901,47 @@
 			       GLOBAL_ATU_OP_BUSY);
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
+{
+	return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
+			       GLOBAL2_SCRATCH_BUSY);
+}
+
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
 					int regnum)
 {
 	int ret;
 
-	REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
-		  GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
-
-	ret = mv88e6xxx_phy_wait(ds);
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+				   GLOBAL2_SMI_OP_22_READ | (addr << 5) |
+				   regnum);
 	if (ret < 0)
 		return ret;
 
-	return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
+	ret = _mv88e6xxx_phy_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
 					 int regnum, u16 val)
 {
-	REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
-	REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
-		  GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
+	int ret;
 
-	return mv88e6xxx_phy_wait(ds);
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+				   GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
+				   regnum);
+
+	return _mv88e6xxx_phy_wait(ds);
 }
 
 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
@@ -824,7 +949,7 @@
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int reg;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 
 	reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
 	if (reg < 0)
@@ -833,7 +958,7 @@
 	e->eee_enabled = !!(reg & 0x0200);
 	e->tx_lpi_enabled = !!(reg & 0x0100);
 
-	reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
 	if (reg < 0)
 		goto out;
 
@@ -841,7 +966,7 @@
 	reg = 0;
 
 out:
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return reg;
 }
 
@@ -852,7 +977,7 @@
 	int reg;
 	int ret;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 
 	ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
 	if (ret < 0)
@@ -866,7 +991,7 @@
 
 	ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
 out:
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
@@ -1241,13 +1366,212 @@
 	}
 }
 
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret, fid;
+	u16 reg;
 
 	mutex_lock(&ps->smi_mutex);
 
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+	    mv88e6xxx_6065_family(ds)) {
+		/* MAC Forcing register: don't force link, speed,
+		 * duplex or flow control state to any particular
+		 * values on physical ports, but force the CPU port
+		 * and all DSA ports to their maximum bandwidth and
+		 * full duplex.
+		 */
+		reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+		if (dsa_is_cpu_port(ds, port) ||
+		    ds->dsa_port_mask & (1 << port)) {
+			reg |= PORT_PCS_CTRL_FORCE_LINK |
+				PORT_PCS_CTRL_LINK_UP |
+				PORT_PCS_CTRL_DUPLEX_FULL |
+				PORT_PCS_CTRL_FORCE_DUPLEX;
+			if (mv88e6xxx_6065_family(ds))
+				reg |= PORT_PCS_CTRL_100;
+			else
+				reg |= PORT_PCS_CTRL_1000;
+		} else {
+			reg |= PORT_PCS_CTRL_UNFORCED;
+		}
+
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_PCS_CTRL, reg);
+		if (ret)
+			goto abort;
+	}
+
+	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+	 * tunneling, determine priority by looking at 802.1p and IP
+	 * priority fields (IP prio has precedence), and set STP state
+	 * to Forwarding.
+	 *
+	 * If this is the CPU link, use DSA or EDSA tagging depending
+	 * on which tagging mode was configured.
+	 *
+	 * If this is a link to another switch, use DSA tagging mode.
+	 *
+	 * If this is the upstream port for this switch, enable
+	 * forwarding of unknown unicasts and multicasts.
+	 */
+	reg = 0;
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+	    mv88e6xxx_6185_family(ds))
+		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
+		PORT_CONTROL_STATE_FORWARDING;
+	if (dsa_is_cpu_port(ds, port)) {
+		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+			reg |= PORT_CONTROL_DSA_TAG;
+		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+			else
+				reg |= PORT_CONTROL_FRAME_MODE_DSA;
+		}
+
+		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+		    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+		    mv88e6xxx_6185_family(ds)) {
+			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+		}
+	}
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+		if (ds->dsa_port_mask & (1 << port))
+			reg |= PORT_CONTROL_FRAME_MODE_DSA;
+		if (port == dsa_upstream_port(ds))
+			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+				PORT_CONTROL_FORWARD_UNKNOWN_MC;
+	}
+	if (reg) {
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_CONTROL, reg);
+		if (ret)
+			goto abort;
+	}
+
+	/* Port Control 2: don't force a good FCS, set the maximum
+	 * frame size to 10240 bytes, don't let the switch add or
+	 * strip 802.1q tags, don't discard tagged or untagged frames
+	 * on this port, do a destination address lookup on all
+	 * received packets as usual, disable ARP mirroring and don't
+	 * send a copy of all transmitted/received frames on this port
+	 * to the CPU.
+	 */
+	reg = 0;
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6095_family(ds))
+		reg = PORT_CONTROL_2_MAP_DA;
+
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds))
+		reg |= PORT_CONTROL_2_JUMBO_10240;
+
+	if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+		/* Set the upstream port this port should use */
+		reg |= dsa_upstream_port(ds);
+		/* enable forwarding of unknown multicast addresses to
+		 * the upstream port
+		 */
+		if (port == dsa_upstream_port(ds))
+			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+	}
+
+	if (reg) {
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_CONTROL_2, reg);
+		if (ret)
+			goto abort;
+	}
+
+	/* Port Association Vector: when learning source addresses
+	 * of packets, add the address to the address database using
+	 * a port bitmap that has only the bit for this port set and
+	 * the other bits clear.
+	 */
+	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
+				   1 << port);
+	if (ret)
+		goto abort;
+
+	/* Egress rate control 2: disable egress rate control. */
+	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+				   0x0000);
+	if (ret)
+		goto abort;
+
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+		/* Do not limit the period of time that this port can
+		 * be paused for by the remote end or the period of
+		 * time that this port can pause the remote end.
+		 */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_PAUSE_CTRL, 0x0000);
+		if (ret)
+			goto abort;
+
+		/* Port ATU control: disable limiting the number of
+		 * address database entries that this port is allowed
+		 * to use.
+		 */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_ATU_CONTROL, 0x0000);
+		/* Priority Override: disable DA, SA and VTU priority
+		 * override.
+		 */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_PRI_OVERRIDE, 0x0000);
+		if (ret)
+			goto abort;
+
+		/* Port Ethertype: use the Ethertype DSA Ethertype
+		 * value.
+		 */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_ETH_TYPE, ETH_P_EDSA);
+		if (ret)
+			goto abort;
+		/* Tag Remap: use an identity 802.1p prio -> switch
+		 * prio mapping.
+		 */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_TAG_REGMAP_0123, 0x3210);
+		if (ret)
+			goto abort;
+
+		/* Tag Remap 2: use an identity 802.1p prio -> switch
+		 * prio mapping.
+		 */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_TAG_REGMAP_4567, 0x7654);
+		if (ret)
+			goto abort;
+	}
+
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+		/* Rate Control: disable ingress rate limiting. */
+		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+					   PORT_RATE_CONTROL, 0x0001);
+		if (ret)
+			goto abort;
+	}
+
 	/* Port Control 1: disable trunking, disable sending
 	 * learning messages to this port.
 	 */
@@ -1281,13 +1605,283 @@
 	return ret;
 }
 
+int mv88e6xxx_setup_ports(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+	int i;
+
+	for (i = 0; i < ps->num_ports; i++) {
+		ret = mv88e6xxx_setup_port(ds, i);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
+{
+	struct dsa_switch *ds = s->private;
+
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int reg, port;
+
+	seq_puts(s, "    GLOBAL GLOBAL2 ");
+	for (port = 0 ; port < ps->num_ports; port++)
+		seq_printf(s, " %2d  ", port);
+	seq_puts(s, "\n");
+
+	for (reg = 0; reg < 32; reg++) {
+		seq_printf(s, "%2x: ", reg);
+		seq_printf(s, " %4x    %4x  ",
+			   mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
+			   mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
+
+		for (port = 0 ; port < ps->num_ports; port++)
+			seq_printf(s, "%4x ",
+				   mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
+		seq_puts(s, "\n");
+	}
+
+	return 0;
+}
+
+static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mv88e6xxx_regs_show, inode->i_private);
+}
+
+static const struct file_operations mv88e6xxx_regs_fops = {
+	.open   = mv88e6xxx_regs_open,
+	.read   = seq_read,
+	.llseek = no_llseek,
+	.release = single_release,
+	.owner  = THIS_MODULE,
+};
+
+static void mv88e6xxx_atu_show_header(struct seq_file *s)
+{
+	seq_puts(s, "DB   T/P  Vec State Addr\n");
+}
+
+static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
+				     unsigned char *addr, int data)
+{
+	bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
+	int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
+		       GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
+	int state = data & GLOBAL_ATU_DATA_STATE_MASK;
+
+	seq_printf(s, "%03x %5s %10pb   %x   %pM\n",
+		   dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
+}
+
+static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
+				 int dbnum)
+{
+	unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	unsigned char addr[6];
+	int ret, data, state;
+
+	ret = __mv88e6xxx_write_addr(ds, bcast);
+	if (ret < 0)
+		return ret;
+
+	do {
+		ret = _mv88e6xxx_atu_cmd(ds, dbnum, GLOBAL_ATU_OP_GET_NEXT_DB);
+		if (ret < 0)
+			return ret;
+		data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+		if (data < 0)
+			return data;
+
+		state = data & GLOBAL_ATU_DATA_STATE_MASK;
+		if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
+			break;
+		ret = __mv88e6xxx_read_addr(ds, addr);
+		if (ret < 0)
+			return ret;
+		mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
+	} while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
+
+	return 0;
+}
+
+static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
+{
+	struct dsa_switch *ds = s->private;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int dbnum;
+
+	mv88e6xxx_atu_show_header(s);
+
+	for (dbnum = 0; dbnum < 255; dbnum++) {
+		mutex_lock(&ps->smi_mutex);
+		mv88e6xxx_atu_show_db(s, ds, dbnum);
+		mutex_unlock(&ps->smi_mutex);
+	}
+
+	return 0;
+}
+
+static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mv88e6xxx_atu_show, inode->i_private);
+}
+
+static const struct file_operations mv88e6xxx_atu_fops = {
+	.open   = mv88e6xxx_atu_open,
+	.read   = seq_read,
+	.llseek = no_llseek,
+	.release = single_release,
+	.owner  = THIS_MODULE,
+};
+
+static void mv88e6xxx_stats_show_header(struct seq_file *s,
+					struct mv88e6xxx_priv_state *ps)
+{
+	int port;
+
+	seq_puts(s, "      Statistic       ");
+	for (port = 0 ; port < ps->num_ports; port++)
+		seq_printf(s, "Port %2d  ", port);
+	seq_puts(s, "\n");
+}
+
+static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
+{
+	struct dsa_switch *ds = s->private;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
+	int port, stat, max_stats;
+	uint64_t value;
+
+	if (have_sw_in_discards(ds))
+		max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
+	else
+		max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
+
+	mv88e6xxx_stats_show_header(s, ps);
+
+	mutex_lock(&ps->smi_mutex);
+
+	for (stat = 0; stat < max_stats; stat++) {
+		seq_printf(s, "%19s: ", stats[stat].string);
+		for (port = 0 ; port < ps->num_ports; port++) {
+			_mv88e6xxx_stats_snapshot(ds, port);
+			value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
+							    port);
+			seq_printf(s, "%8llu ", value);
+		}
+		seq_puts(s, "\n");
+	}
+	mutex_unlock(&ps->smi_mutex);
+
+	return 0;
+}
+
+static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mv88e6xxx_stats_show, inode->i_private);
+}
+
+static const struct file_operations mv88e6xxx_stats_fops = {
+	.open   = mv88e6xxx_stats_open,
+	.read   = seq_read,
+	.llseek = no_llseek,
+	.release = single_release,
+	.owner  = THIS_MODULE,
+};
+
+static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
+{
+	struct dsa_switch *ds = s->private;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int target, ret;
+
+	seq_puts(s, "Target Port\n");
+
+	mutex_lock(&ps->smi_mutex);
+	for (target = 0; target < 32; target++) {
+		ret = _mv88e6xxx_reg_write(
+			ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
+			target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
+		if (ret < 0)
+			goto out;
+		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
+					  GLOBAL2_DEVICE_MAPPING);
+		seq_printf(s, "  %2d   %2d\n", target,
+			   ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
+	}
+out:
+	mutex_unlock(&ps->smi_mutex);
+
+	return 0;
+}
+
+static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
+}
+
+static const struct file_operations mv88e6xxx_device_map_fops = {
+	.open   = mv88e6xxx_device_map_open,
+	.read   = seq_read,
+	.llseek = no_llseek,
+	.release = single_release,
+	.owner  = THIS_MODULE,
+};
+
+static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
+{
+	struct dsa_switch *ds = s->private;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int reg, ret;
+
+	seq_puts(s, "Register Value\n");
+
+	mutex_lock(&ps->smi_mutex);
+	for (reg = 0; reg < 0x80; reg++) {
+		ret = _mv88e6xxx_reg_write(
+			ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
+			reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
+		if (ret < 0)
+			goto out;
+
+		ret = _mv88e6xxx_scratch_wait(ds);
+		if (ret < 0)
+			goto out;
+
+		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
+					  GLOBAL2_SCRATCH_MISC);
+		seq_printf(s, "  %2x   %2x\n", reg,
+			   ret & GLOBAL2_SCRATCH_VALUE_MASK);
+	}
+out:
+	mutex_unlock(&ps->smi_mutex);
+
+	return 0;
+}
+
+static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
+}
+
+static const struct file_operations mv88e6xxx_scratch_fops = {
+	.open   = mv88e6xxx_scratch_open,
+	.read   = seq_read,
+	.llseek = no_llseek,
+	.release = single_release,
+	.owner  = THIS_MODULE,
+};
+
 int mv88e6xxx_setup_common(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	char *name;
 
 	mutex_init(&ps->smi_mutex);
-	mutex_init(&ps->stats_mutex);
-	mutex_init(&ps->phy_mutex);
 
 	ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
 
@@ -1295,6 +1889,128 @@
 
 	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
+	name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
+	ps->dbgfs = debugfs_create_dir(name, NULL);
+	kfree(name);
+
+	debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
+			    &mv88e6xxx_regs_fops);
+
+	debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
+			    &mv88e6xxx_atu_fops);
+
+	debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
+			    &mv88e6xxx_stats_fops);
+
+	debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
+			    &mv88e6xxx_device_map_fops);
+
+	debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
+			    &mv88e6xxx_scratch_fops);
+	return 0;
+}
+
+int mv88e6xxx_setup_global(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int i;
+
+	/* Set the default address aging time to 5 minutes, and
+	 * enable address learn messages to be sent to all message
+	 * ports.
+	 */
+	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+		  0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+
+	/* Configure the IP ToS mapping registers. */
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+
+	/* Configure the IEEE 802.1p priority mapping register. */
+	REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+
+	/* Send all frames with destination addresses matching
+	 * 01:80:c2:00:00:0x to the CPU port.
+	 */
+	REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+
+	/* Ignore removed tag data on doubly tagged packets, disable
+	 * flow control messages, force flow control priority to the
+	 * highest, and send all special multicast frames to the CPU
+	 * port at the highest priority.
+	 */
+	REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+		  0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+		  GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+
+	/* Program the DSA routing table. */
+	for (i = 0; i < 32; i++) {
+		int nexthop = 0x1f;
+
+		if (ds->pd->rtable &&
+		    i != ds->index && i < ds->dst->pd->nr_chips)
+			nexthop = ds->pd->rtable[i] & 0x1f;
+
+		REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
+			  GLOBAL2_DEVICE_MAPPING_UPDATE |
+			  (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
+			  nexthop);
+	}
+
+	/* Clear all trunk masks. */
+	for (i = 0; i < 8; i++)
+		REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+			  0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+			  ((1 << ps->num_ports) - 1));
+
+	/* Clear all trunk mappings. */
+	for (i = 0; i < 16; i++)
+		REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
+			  GLOBAL2_TRUNK_MAPPING_UPDATE |
+			  (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+		/* Send all frames with destination addresses matching
+		 * 01:80:c2:00:00:2x to the CPU port.
+		 */
+		REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+
+		/* Initialise cross-chip port VLAN table to reset
+		 * defaults.
+		 */
+		REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+
+		/* Clear the priority override table. */
+		for (i = 0; i < 16; i++)
+			REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+				  0x8000 | (i << 8));
+	}
+
+	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+		/* Disable ingress rate limiting by resetting all
+		 * ingress rate limit registers to their initial
+		 * state.
+		 */
+		for (i = 0; i < ps->num_ports; i++)
+			REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
+				  0x9000 | (i << 8));
+	}
+
+	/* Clear the statistics counters for all ports */
+	REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
+
+	/* Wait for the flush to complete. */
+	_mv88e6xxx_stats_wait(ds);
+
 	return 0;
 }
 
@@ -1343,14 +2059,14 @@
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 	ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
 	if (ret < 0)
 		goto error;
 	ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
 error:
 	_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
@@ -1360,7 +2076,7 @@
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 	ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
 	if (ret < 0)
 		goto error;
@@ -1368,7 +2084,7 @@
 	ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
 error:
 	_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
@@ -1391,9 +2107,9 @@
 	if (addr < 0)
 		return addr;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 	ret = _mv88e6xxx_phy_read(ds, addr, regnum);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
@@ -1407,9 +2123,9 @@
 	if (addr < 0)
 		return addr;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 	ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
@@ -1423,9 +2139,9 @@
 	if (addr < 0)
 		return addr;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 	ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
@@ -1440,9 +2156,9 @@
 	if (addr < 0)
 		return addr;
 
-	mutex_lock(&ps->phy_mutex);
+	mutex_lock(&ps->smi_mutex);
 	ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
-	mutex_unlock(&ps->phy_mutex);
+	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index e045154..a650b26 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,6 +11,10 @@
 #ifndef __MV88E6XXX_H
 #define __MV88E6XXX_H
 
+#ifndef UINT64_MAX
+#define UINT64_MAX		(u64)(~((u64)0))
+#endif
+
 #define SMI_CMD			0x00
 #define SMI_CMD_BUSY		BIT(15)
 #define SMI_CMD_CLAUSE_22	BIT(12)
@@ -40,9 +44,31 @@
 #define PORT_STATUS_TX_PAUSED	BIT(5)
 #define PORT_STATUS_FLOW_CTRL	BIT(4)
 #define PORT_PCS_CTRL		0x01
+#define PORT_PCS_CTRL_FC		BIT(7)
+#define PORT_PCS_CTRL_FORCE_FC		BIT(6)
+#define PORT_PCS_CTRL_LINK_UP		BIT(5)
+#define PORT_PCS_CTRL_FORCE_LINK	BIT(4)
+#define PORT_PCS_CTRL_DUPLEX_FULL	BIT(3)
+#define PORT_PCS_CTRL_FORCE_DUPLEX	BIT(2)
+#define PORT_PCS_CTRL_10		0x00
+#define PORT_PCS_CTRL_100		0x01
+#define PORT_PCS_CTRL_1000		0x02
+#define PORT_PCS_CTRL_UNFORCED		0x03
+#define PORT_PAUSE_CTRL		0x02
 #define PORT_SWITCH_ID		0x03
+#define PORT_SWITCH_ID_6031	0x0310
+#define PORT_SWITCH_ID_6035	0x0350
+#define PORT_SWITCH_ID_6046	0x0480
+#define PORT_SWITCH_ID_6061	0x0610
+#define PORT_SWITCH_ID_6065	0x0650
 #define PORT_SWITCH_ID_6085	0x04a0
+#define PORT_SWITCH_ID_6092	0x0970
 #define PORT_SWITCH_ID_6095	0x0950
+#define PORT_SWITCH_ID_6096	0x0980
+#define PORT_SWITCH_ID_6097	0x0990
+#define PORT_SWITCH_ID_6108	0x1070
+#define PORT_SWITCH_ID_6121	0x1040
+#define PORT_SWITCH_ID_6122	0x1050
 #define PORT_SWITCH_ID_6123	0x1210
 #define PORT_SWITCH_ID_6123_A1	0x1212
 #define PORT_SWITCH_ID_6123_A2	0x1213
@@ -58,13 +84,38 @@
 #define PORT_SWITCH_ID_6165_A2	0x1653
 #define PORT_SWITCH_ID_6171	0x1710
 #define PORT_SWITCH_ID_6172	0x1720
+#define PORT_SWITCH_ID_6175	0x1750
 #define PORT_SWITCH_ID_6176	0x1760
 #define PORT_SWITCH_ID_6182	0x1a60
 #define PORT_SWITCH_ID_6185	0x1a70
+#define PORT_SWITCH_ID_6240	0x2400
+#define PORT_SWITCH_ID_6320	0x1250
+#define PORT_SWITCH_ID_6350	0x3710
+#define PORT_SWITCH_ID_6351	0x3750
 #define PORT_SWITCH_ID_6352	0x3520
 #define PORT_SWITCH_ID_6352_A0	0x3521
 #define PORT_SWITCH_ID_6352_A1	0x3522
 #define PORT_CONTROL		0x04
+#define PORT_CONTROL_USE_CORE_TAG	BIT(15)
+#define PORT_CONTROL_DROP_ON_LOCK	BIT(14)
+#define PORT_CONTROL_EGRESS_UNMODIFIED	(0x0 << 12)
+#define PORT_CONTROL_EGRESS_UNTAGGED	(0x1 << 12)
+#define PORT_CONTROL_EGRESS_TAGGED	(0x2 << 12)
+#define PORT_CONTROL_EGRESS_ADD_TAG	(0x3 << 12)
+#define PORT_CONTROL_HEADER		BIT(11)
+#define PORT_CONTROL_IGMP_MLD_SNOOP	BIT(10)
+#define PORT_CONTROL_DOUBLE_TAG		BIT(9)
+#define PORT_CONTROL_FRAME_MODE_NORMAL		(0x0 << 8)
+#define PORT_CONTROL_FRAME_MODE_DSA		(0x1 << 8)
+#define PORT_CONTROL_FRAME_MODE_PROVIDER	(0x2 << 8)
+#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA	(0x3 << 8)
+#define PORT_CONTROL_DSA_TAG		BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL	BIT(7)
+#define PORT_CONTROL_TAG_IF_BOTH	BIT(6)
+#define PORT_CONTROL_USE_IP		BIT(5)
+#define PORT_CONTROL_USE_TAG		BIT(4)
+#define PORT_CONTROL_FORWARD_UNKNOWN_MC	BIT(3)
+#define PORT_CONTROL_FORWARD_UNKNOWN	BIT(2)
 #define PORT_CONTROL_STATE_MASK		0x03
 #define PORT_CONTROL_STATE_DISABLED	0x00
 #define PORT_CONTROL_STATE_BLOCKING	0x01
@@ -74,15 +125,32 @@
 #define PORT_BASE_VLAN		0x06
 #define PORT_DEFAULT_VLAN	0x07
 #define PORT_CONTROL_2		0x08
+#define PORT_CONTROL_2_IGNORE_FCS	BIT(15)
+#define PORT_CONTROL_2_VTU_PRI_OVERRIDE	BIT(14)
+#define PORT_CONTROL_2_SA_PRIO_OVERRIDE	BIT(13)
+#define PORT_CONTROL_2_DA_PRIO_OVERRIDE	BIT(12)
+#define PORT_CONTROL_2_JUMBO_1522	(0x00 << 12)
+#define PORT_CONTROL_2_JUMBO_2048	(0x01 << 12)
+#define PORT_CONTROL_2_JUMBO_10240	(0x02 << 12)
+#define PORT_CONTROL_2_DISCARD_TAGGED	BIT(9)
+#define PORT_CONTROL_2_DISCARD_UNTAGGED	BIT(8)
+#define PORT_CONTROL_2_MAP_DA		BIT(7)
+#define PORT_CONTROL_2_DEFAULT_FORWARD	BIT(6)
+#define PORT_CONTROL_2_FORWARD_UNKNOWN	BIT(6)
+#define PORT_CONTROL_2_EGRESS_MONITOR	BIT(5)
+#define PORT_CONTROL_2_INGRESS_MONITOR	BIT(4)
 #define PORT_RATE_CONTROL	0x09
 #define PORT_RATE_CONTROL_2	0x0a
 #define PORT_ASSOC_VECTOR	0x0b
+#define PORT_ATU_CONTROL	0x0c
+#define PORT_PRI_OVERRIDE	0x0d
+#define PORT_ETH_TYPE		0x0f
 #define PORT_IN_DISCARD_LO	0x10
 #define PORT_IN_DISCARD_HI	0x11
 #define PORT_IN_FILTERED	0x12
 #define PORT_OUT_FILTERED	0x13
-#define PORT_TAG_REGMAP_0123	0x19
-#define PORT_TAG_REGMAP_4567	0x1a
+#define PORT_TAG_REGMAP_0123	0x18
+#define PORT_TAG_REGMAP_4567	0x19
 
 #define REG_GLOBAL		0x1b
 #define GLOBAL_STATUS		0x00
@@ -102,7 +170,7 @@
 #define GLOBAL_CONTROL_DISCARD_EXCESS	BIT(13) /* 6352 */
 #define GLOBAL_CONTROL_SCHED_PRIO	BIT(11) /* 6152 */
 #define GLOBAL_CONTROL_MAX_FRAME_1632	BIT(10) /* 6152 */
-#define GLOBAL_CONTROL_RELOAD_EEPROM	BIT(9)  /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM	BIT(9)	/* 6152 */
 #define GLOBAL_CONTROL_DEVICE_EN	BIT(7)
 #define GLOBAL_CONTROL_STATS_DONE_EN	BIT(6)
 #define GLOBAL_CONTROL_VTU_PROBLEM_EN	BIT(5)
@@ -117,6 +185,7 @@
 #define GLOBAL_VTU_DATA_4_7	0x08
 #define GLOBAL_VTU_DATA_8_11	0x09
 #define GLOBAL_ATU_CONTROL	0x0a
+#define GLOBAL_ATU_CONTROL_LEARN2ALL	BIT(3)
 #define GLOBAL_ATU_OP		0x0b
 #define GLOBAL_ATU_OP_BUSY	BIT(15)
 #define GLOBAL_ATU_OP_NOP		(0 << 12)
@@ -128,6 +197,9 @@
 #define GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
 #define GLOBAL_ATU_OP_GET_CLR_VIOLATION	  ((7 << 12) | GLOBAL_ATU_OP_BUSY)
 #define GLOBAL_ATU_DATA		0x0c
+#define GLOBAL_ATU_DATA_TRUNK			BIT(15)
+#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK	0x3ff0
+#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT	4
 #define GLOBAL_ATU_DATA_STATE_MASK		0x0f
 #define GLOBAL_ATU_DATA_STATE_UNUSED		0x00
 #define GLOBAL_ATU_DATA_STATE_UC_MGMT		0x0d
@@ -151,7 +223,15 @@
 #define GLOBAL_IEEE_PRI		0x18
 #define GLOBAL_CORE_TAG_TYPE	0x19
 #define GLOBAL_MONITOR_CONTROL	0x1a
+#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT	12
+#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT	8
+#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT	4
+#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT	0
+#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED	(0xf0)
 #define GLOBAL_CONTROL_2	0x1c
+#define GLOBAL_CONTROL_2_NO_CASCADE		0xe000
+#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE	0xf000
+
 #define GLOBAL_STATS_OP		0x1d
 #define GLOBAL_STATS_OP_BUSY	BIT(15)
 #define GLOBAL_STATS_OP_NOP		(0 << 12)
@@ -172,9 +252,21 @@
 #define GLOBAL2_MGMT_EN_0X	0x03
 #define GLOBAL2_FLOW_CONTROL	0x04
 #define GLOBAL2_SWITCH_MGMT	0x05
+#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA	BIT(15)
+#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS	BIT(14)
+#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG	BIT(13)
+#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI	BIT(7)
+#define GLOBAL2_SWITCH_MGMT_RSVD2CPU		BIT(3)
 #define GLOBAL2_DEVICE_MAPPING	0x06
+#define GLOBAL2_DEVICE_MAPPING_UPDATE		BIT(15)
+#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT	8
+#define GLOBAL2_DEVICE_MAPPING_PORT_MASK	0x0f
 #define GLOBAL2_TRUNK_MASK	0x07
+#define GLOBAL2_TRUNK_MASK_UPDATE		BIT(15)
+#define GLOBAL2_TRUNK_MASK_NUM_SHIFT		12
 #define GLOBAL2_TRUNK_MAPPING	0x08
+#define GLOBAL2_TRUNK_MAPPING_UPDATE		BIT(15)
+#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT		11
 #define GLOBAL2_INGRESS_OP	0x09
 #define GLOBAL2_INGRESS_DATA	0x0a
 #define GLOBAL2_PVT_ADDR	0x0b
@@ -183,6 +275,10 @@
 #define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
 #define GLOBAL2_ATU_STATS	0x0e
 #define GLOBAL2_PRIO_OVERRIDE	0x0f
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP	BIT(7)
+#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT	4
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP		BIT(3)
+#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT		0
 #define GLOBAL2_EEPROM_OP	0x14
 #define GLOBAL2_EEPROM_OP_BUSY	BIT(15)
 #define GLOBAL2_EEPROM_OP_LOAD	BIT(11)
@@ -201,6 +297,9 @@
 #define GLOBAL2_SMI_OP_45_READ_DATA	((2 << 10) | GLOBAL2_SMI_OP_BUSY)
 #define GLOBAL2_SMI_DATA	0x19
 #define GLOBAL2_SCRATCH_MISC	0x1a
+#define GLOBAL2_SCRATCH_BUSY		BIT(15)
+#define GLOBAL2_SCRATCH_REGISTER_SHIFT	8
+#define GLOBAL2_SCRATCH_VALUE_MASK	0xff
 #define GLOBAL2_WDOG_CONTROL	0x1b
 #define GLOBAL2_QOS_WEIGHT	0x1c
 #define GLOBAL2_MISC		0x1d
@@ -251,6 +350,8 @@
 	u8 port_state[DSA_MAX_PORTS];
 
 	struct work_struct bridge_work;
+
+	struct dentry *dbgfs;
 };
 
 struct mv88e6xxx_hw_stat {
@@ -260,14 +361,14 @@
 };
 
 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
+int mv88e6xxx_setup_ports(struct dsa_switch *ds);
 int mv88e6xxx_setup_common(struct dsa_switch *ds);
+int mv88e6xxx_setup_global(struct dsa_switch *ds);
 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
 			  int reg, u16 val);
 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_config_prio(struct dsa_switch *ds);
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
 int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
@@ -289,7 +390,6 @@
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
 			struct ethtool_regs *regs, void *_p);
 int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_phy_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index afaab4b..5b7658b 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on ISA || EISA || PCI || PCMCIA
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -23,8 +21,7 @@
 	depends on (ISA || EISA)
 	---help---
 	  If you have a network (Ethernet) card belonging to the 3Com
-	  EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
-	  from <http://www.tldp.org/docs.html#howto>.
+	  EtherLinkIII series, say Y here.
 
 	  If your card is not working you may need to use the DOS
 	  setup disk to disable Plug & Play mode, and to select the default
@@ -38,8 +35,7 @@
 	depends on ISA && ISA_DMA_API
 	---help---
 	  If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
-	  network card, say Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  network card, say Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called 3c515.
@@ -78,9 +74,7 @@
 	  "Tornado"   (3c905)                                  PCI
 	  "Hurricane" (3c555/3cSOHO)                           PCI
 
-	  If you have such a card, say Y and read the Ethernet-HOWTO,
-	  available from <http://www.tldp.org/docs.html#howto>. More
-	  specific information is in
+	  If you have such a card, say Y here.  More specific information is in
 	  <file:Documentation/networking/vortex.txt> and in the comments at
 	  the beginning of <file:drivers/net/ethernet/3com/3c59x.c>.
 
@@ -97,9 +91,7 @@
 	  3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server,
 	  3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR
 
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called typhoon.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2d89bd0..edf7225 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on NET_VENDOR_NATSEMI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -71,9 +69,7 @@
 	select CRC32
 	---help---
 	  If you want to include a driver to support Nubus or LC-PDS
-	  Ethernet cards using an NS8390 chipset or its equivalent, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  Ethernet cards using an NS8390 chipset or its equivalent, say Y.
 
 config MCF8390
 	tristate "ColdFire NS8390 based Ethernet support"
@@ -95,10 +91,9 @@
 		    ATARI_ETHERNEC)
 	select CRC32
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  Many Ethernet cards
-	  without a specific driver are compatible with NE2000.
+	  If you have a network (Ethernet) card of this type, say Y here.
+	  Many Ethernet cards without a specific driver are compatible with
+	  the NE2000.
 
 	  If you have a PCI NE2000 card however, say N here and Y to "PCI
 	  NE2000 and clone support" below.
@@ -114,8 +109,7 @@
 	  This driver is for NE2000 compatible PCI cards. It will not work
 	  with ISA NE2000 cards (they have their own driver, "NE2000/NE1000
 	  support" below). If you have a PCI NE2000 network (Ethernet) card,
-	  say Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  say Y here.
 
 	  This driver also works for the following NE2000 clone cards:
 	  RealTek RTL-8029  Winbond 89C940  Compex RL2000  KTI ET32P2
@@ -164,9 +158,7 @@
 	depends on ISA
 	select CRC32
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card of this type, say Y here.
 
 	  Important: There have been many reports that, with some motherboards
 	  mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible,
@@ -183,9 +175,7 @@
 	depends on ISA
 	select CRC32
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called wd.
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index eadcb05..f3bb178 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -34,6 +34,7 @@
 source "drivers/net/ethernet/broadcom/Kconfig"
 source "drivers/net/ethernet/brocade/Kconfig"
 source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
@@ -66,6 +67,7 @@
 source "drivers/net/ethernet/dec/Kconfig"
 source "drivers/net/ethernet/dlink/Kconfig"
 source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/ezchip/Kconfig"
 source "drivers/net/ethernet/neterion/Kconfig"
 source "drivers/net/ethernet/faraday/Kconfig"
 source "drivers/net/ethernet/freescale/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1367afc..c51014b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
 obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
@@ -29,6 +30,7 @@
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
 obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
 obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
 obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
 obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
 obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig
index 5c804bb..822cffb 100644
--- a/drivers/net/ethernet/adaptec/Kconfig
+++ b/drivers/net/ethernet/adaptec/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index c9cd359..6b94ba6 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -7,8 +7,6 @@
 	depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
-	  Make sure you know the name of your card. Read the Ethernet-HOWTO,
-	  available from <http://www.tldp.org/docs.html#howto>.
 
 	  If unsure, say Y.
 
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
index 63e805d..b6fe920 100644
--- a/drivers/net/ethernet/agere/Kconfig
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
index d8d95d4..47da7e7 100644
--- a/drivers/net/ethernet/allwinner/Kconfig
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -9,8 +9,7 @@
 	depends on ARCH_SUNXI
 	---help---
 	  If you have a network (Ethernet) card belonging to this
-	  class, say Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  class, say Y here.
 
 	  Note that the answer to this question doesn't directly
 	  affect the kernel: saying N will just cause the configurator
diff --git a/drivers/net/ethernet/alteon/Kconfig b/drivers/net/ethernet/alteon/Kconfig
index 799a852..e06ccab 100644
--- a/drivers/net/ethernet/alteon/Kconfig
+++ b/drivers/net/ethernet/alteon/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 4269160..afc62ea 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -37,8 +37,7 @@
 	select MII
 	---help---
 	  If you have an AMD 8111-based PCI LANCE ethernet card,
-	  answer Y here and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  answer Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called amd8111e.
@@ -47,10 +46,8 @@
 	tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
 	depends on ISA && ISA_DMA_API && !ARM
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
-	  of this type.
+	  If you have a network (Ethernet) card of this type, say Y here.
+	  Some LinkSys cards are of this type.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called lance.  This is recommended.
@@ -62,8 +59,7 @@
 	select MII
 	---help---
 	  If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
-	  answer Y here and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  answer Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called pcnet32.
@@ -144,9 +140,7 @@
 	tristate "NI6510 support"
 	depends on ISA && ISA_DMA_API && !ARM
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called ni65.
@@ -179,10 +173,8 @@
 
 config AMD_XGBE
 	tristate "AMD 10GbE Ethernet driver"
-	depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+	depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
 	depends on ARM64 || COMPILE_TEST
-	select PHYLIB
-	select AMD_XGBE_PHY
 	select BITREVERSE
 	select CRC32
 	select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 34c28aa..b6fa891 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -857,6 +857,48 @@
  */
 #define PCS_MMD_SELECT			0xff
 
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1			0x002c
+#define SIR0_STATUS			0x0040
+#define SIR1_SPEED			0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX	11
+#define SIR0_KR_RT_1_RESET_WIDTH	1
+#define SIR0_STATUS_RX_READY_INDEX	0
+#define SIR0_STATUS_RX_READY_WIDTH	1
+#define SIR0_STATUS_TX_READY_INDEX	8
+#define SIR0_STATUS_TX_READY_WIDTH	1
+#define SIR1_SPEED_CDR_RATE_INDEX	12
+#define SIR1_SPEED_CDR_RATE_WIDTH	4
+#define SIR1_SPEED_DATARATE_INDEX	4
+#define SIR1_SPEED_DATARATE_WIDTH	2
+#define SIR1_SPEED_PLLSEL_INDEX		3
+#define SIR1_SPEED_PLLSEL_WIDTH		1
+#define SIR1_SPEED_RATECHANGE_INDEX	6
+#define SIR1_SPEED_RATECHANGE_WIDTH	1
+#define SIR1_SPEED_TXAMP_INDEX		8
+#define SIR1_SPEED_TXAMP_WIDTH		4
+#define SIR1_SPEED_WORDMODE_INDEX	0
+#define SIR1_SPEED_WORDMODE_WIDTH	3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6			0x0018
+#define RXTX_REG20			0x0050
+#define RXTX_REG22			0x0058
+#define RXTX_REG114			0x01c8
+#define RXTX_REG129			0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX	8
+#define RXTX_REG6_RESETB_RXD_WIDTH	1
+#define RXTX_REG20_BLWC_ENA_INDEX	2
+#define RXTX_REG20_BLWC_ENA_WIDTH	1
+#define RXTX_REG114_PQ_REG_INDEX	9
+#define RXTX_REG114_PQ_REG_WIDTH	7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX	14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH	2
+
 /* Descriptor/Packet entry bit positions and sizes */
 #define RX_PACKET_ERRORS_CRC_INDEX		2
 #define RX_PACKET_ERRORS_CRC_WIDTH		1
@@ -973,10 +1015,47 @@
 #define TX_NORMAL_DESC2_VLAN_INSERT		0x2
 
 /* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL		0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL		0x00ab
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP			0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX			0x0019
+#endif
+
 #ifndef MDIO_AN_COMP_STAT
 #define MDIO_AN_COMP_STAT		0x0030
 #endif
 
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK			0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT			0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G		(MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* MDIO mask values */
+#define XGBE_XNP_MCF_NULL_MESSAGE	0x001
+#define XGBE_XNP_ACK_PROCESSED		BIT(12)
+#define XGBE_XNP_MP_FORMATTED		BIT(13)
+#define XGBE_XNP_NP_EXCHANGE		BIT(15)
+
+#define XGBE_KR_TRAINING_START		BIT(0)
+#define XGBE_KR_TRAINING_ENABLE		BIT(1)
+
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
  *  the variable
@@ -1119,6 +1198,82 @@
 	ioread32((_pdata)->xpcs_regs + (_off))
 
 /* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field)                            \
+	GET_BITS((_var),                                                \
+		 _prefix##_##_field##_INDEX,                            \
+		 _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val)                      \
+	SET_BITS((_var),                                                \
+		 _prefix##_##_field##_INDEX,                            \
+		 _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg)					\
+	ioread16((_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field)				\
+	GET_BITS(XSIR0_IOREAD((_pdata), _reg),				\
+		 _reg##_##_field##_INDEX,				\
+		 _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val)				\
+	iowrite16((_val), (_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val)			\
+do {									\
+	u16 reg_val = XSIR0_IOREAD((_pdata), _reg);			\
+	SET_BITS(reg_val,						\
+		 _reg##_##_field##_INDEX,				\
+		 _reg##_##_field##_WIDTH, (_val));			\
+	XSIR0_IOWRITE((_pdata), _reg, reg_val);				\
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg)					\
+	ioread16((_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field)				\
+	GET_BITS(XSIR1_IOREAD((_pdata), _reg),				\
+		 _reg##_##_field##_INDEX,				\
+		 _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val)				\
+	iowrite16((_val), (_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val)			\
+do {									\
+	u16 reg_val = XSIR1_IOREAD((_pdata), _reg);			\
+	SET_BITS(reg_val,						\
+		 _reg##_##_field##_INDEX,				\
+		 _reg##_##_field##_WIDTH, (_val));			\
+	XSIR1_IOWRITE((_pdata), _reg, reg_val);				\
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg)					\
+	ioread16((_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field)				\
+	GET_BITS(XRXTX_IOREAD((_pdata), _reg),				\
+		 _reg##_##_field##_INDEX,				\
+		 _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val)				\
+	iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val)			\
+do {									\
+	u16 reg_val = XRXTX_IOREAD((_pdata), _reg);			\
+	SET_BITS(reg_val,						\
+		 _reg##_##_field##_INDEX,				\
+		 _reg##_##_field##_WIDTH, (_val));			\
+	XRXTX_IOWRITE((_pdata), _reg, reg_val);				\
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
  * using MDIO.  Different from above because of the use of standardized
  * Linux include values.  No shifting is performed with the bit
  * operations, everything works on mask values.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 8a50b01..a6b9899 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -150,9 +150,12 @@
 	tc_ets = 0;
 	tc_ets_weight = 0;
 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-		DBGPR("  TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
-		      ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
-		DBGPR("  PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+		netif_dbg(pdata, drv, netdev,
+			  "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+			  ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+			  ets->tc_tsa[i]);
+		netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
+			  ets->prio_tc[i]);
 
 		if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
 		    (i >= pdata->hw_feat.tc_cnt))
@@ -214,8 +217,9 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("  cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
-	      pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+	netif_dbg(pdata, drv, netdev,
+		  "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+		  pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
 
 	if (!pdata->pfc) {
 		pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
@@ -238,9 +242,10 @@
 
 static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
 {
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 	u8 support = xgbe_dcb_getdcbx(netdev);
 
-	DBGPR("  DCBX=%#hhx\n", dcbx);
+	netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
 
 	if (dcbx & ~support)
 		return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index d81fc6b..dd03ad8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -208,8 +208,9 @@
 	if (!ring->rdata)
 		return -ENOMEM;
 
-	DBGPR("    rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
-	      ring->rdesc, ring->rdesc_dma, ring->rdata);
+	netif_dbg(pdata, drv, pdata->netdev,
+		  "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
+		  ring->rdesc, &ring->rdesc_dma, ring->rdata);
 
 	DBGPR("<--xgbe_init_ring\n");
 
@@ -226,7 +227,9 @@
 
 	channel = pdata->channel;
 	for (i = 0; i < pdata->channel_count; i++, channel++) {
-		DBGPR("  %s - tx_ring:\n", channel->name);
+		netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+			  channel->name);
+
 		ret = xgbe_init_ring(pdata, channel->tx_ring,
 				     pdata->tx_desc_count);
 		if (ret) {
@@ -235,12 +238,14 @@
 			goto err_ring;
 		}
 
-		DBGPR("  %s - rx_ring:\n", channel->name);
+		netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+			  channel->name);
+
 		ret = xgbe_init_ring(pdata, channel->rx_ring,
 				     pdata->rx_desc_count);
 		if (ret) {
 			netdev_alert(pdata->netdev,
-				     "error initializing Tx ring\n");
+				     "error initializing Rx ring\n");
 			goto err_ring;
 		}
 	}
@@ -476,8 +481,6 @@
 
 	if (rdata->state_saved) {
 		rdata->state_saved = 0;
-		rdata->state.incomplete = 0;
-		rdata->state.context_next = 0;
 		rdata->state.skb = NULL;
 		rdata->state.len = 0;
 		rdata->state.error = 0;
@@ -518,8 +521,6 @@
 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 
 	if (tso) {
-		DBGPR("  TSO packet\n");
-
 		/* Map the TSO header */
 		skb_dma = dma_map_single(pdata->dev, skb->data,
 					 packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +530,9 @@
 		}
 		rdata->skb_dma = skb_dma;
 		rdata->skb_dma_len = packet->header_len;
+		netif_dbg(pdata, tx_queued, pdata->netdev,
+			  "skb header: index=%u, dma=%pad, len=%u\n",
+			  cur_index, &skb_dma, packet->header_len);
 
 		offset = packet->header_len;
 
@@ -550,8 +554,9 @@
 		}
 		rdata->skb_dma = skb_dma;
 		rdata->skb_dma_len = len;
-		DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
-		      cur_index, skb_dma, len);
+		netif_dbg(pdata, tx_queued, pdata->netdev,
+			  "skb data: index=%u, dma=%pad, len=%u\n",
+			  cur_index, &skb_dma, len);
 
 		datalen -= len;
 		offset += len;
@@ -563,7 +568,8 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		DBGPR("  mapping frag %u\n", i);
+		netif_dbg(pdata, tx_queued, pdata->netdev,
+			  "mapping frag %u\n", i);
 
 		frag = &skb_shinfo(skb)->frags[i];
 		offset = 0;
@@ -582,8 +588,9 @@
 			rdata->skb_dma = skb_dma;
 			rdata->skb_dma_len = len;
 			rdata->mapped_as_page = 1;
-			DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
-			      cur_index, skb_dma, len);
+			netif_dbg(pdata, tx_queued, pdata->netdev,
+				  "skb frag: index=%u, dma=%pad, len=%u\n",
+				  cur_index, &skb_dma, len);
 
 			datalen -= len;
 			offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 21d9497..506e832 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -710,7 +710,8 @@
 	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
 		return 0;
 
-	DBGPR("  %s promiscuous mode\n", enable ? "entering" : "leaving");
+	netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+		  enable ? "entering" : "leaving");
 	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
 
 	return 0;
@@ -724,7 +725,8 @@
 	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
 		return 0;
 
-	DBGPR("  %s allmulti mode\n", enable ? "entering" : "leaving");
+	netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+		  enable ? "entering" : "leaving");
 	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
 
 	return 0;
@@ -749,8 +751,9 @@
 		mac_addr[0] = ha->addr[4];
 		mac_addr[1] = ha->addr[5];
 
-		DBGPR("  adding mac address %pM at 0x%04x\n", ha->addr,
-		      *mac_reg);
+		netif_dbg(pdata, drv, pdata->netdev,
+			  "adding mac address %pM at %#x\n",
+			  ha->addr, *mac_reg);
 
 		XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
 	}
@@ -907,23 +910,6 @@
 	else
 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
 
-	/* If the PCS is changing modes, match the MAC speed to it */
-	if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
-	    ((mmd_address & 0xffff) == MDIO_CTRL2)) {
-		struct phy_device *phydev = pdata->phydev;
-
-		if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
-			/* KX mode */
-			if (phydev->supported & SUPPORTED_1000baseKX_Full)
-				xgbe_set_gmii_speed(pdata);
-			else
-				xgbe_set_gmii_2500_speed(pdata);
-		} else {
-			/* KR mode */
-			xgbe_set_xgmii_speed(pdata);
-		}
-	}
-
 	/* The PCS registers are accessed using mmio. The underlying APB3
 	 * management interface uses indirect addressing to access the MMD
 	 * register sets. This requires accessing of the PCS register in two
@@ -1322,7 +1308,8 @@
 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
 		switch (ets->tc_tsa[i]) {
 		case IEEE_8021QAZ_TSA_STRICT:
-			DBGPR("  TC%u using SP\n", i);
+			netif_dbg(pdata, drv, pdata->netdev,
+				  "TC%u using SP\n", i);
 			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
 					       MTL_TSA_SP);
 			break;
@@ -1330,7 +1317,8 @@
 			weight = total_weight * ets->tc_tx_bw[i] / 100;
 			weight = clamp(weight, min_weight, total_weight);
 
-			DBGPR("  TC%u using DWRR (weight %u)\n", i, weight);
+			netif_dbg(pdata, drv, pdata->netdev,
+				  "TC%u using DWRR (weight %u)\n", i, weight);
 			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
 					       MTL_TSA_ETS);
 			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1347,8 @@
 		}
 		mask &= 0xff;
 
-		DBGPR("  TC%u PFC mask=%#x\n", tc, mask);
+		netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
+			  tc, mask);
 		reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
 		reg_val = XGMAC_IOREAD(pdata, reg);
 
@@ -1457,8 +1446,9 @@
 	/* Create a context descriptor if this is a TSO packet */
 	if (tso_context || vlan_context) {
 		if (tso_context) {
-			DBGPR("  TSO context descriptor, mss=%u\n",
-			      packet->mss);
+			netif_dbg(pdata, tx_queued, pdata->netdev,
+				  "TSO context descriptor, mss=%u\n",
+				  packet->mss);
 
 			/* Set the MSS size */
 			XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1466,9 @@
 		}
 
 		if (vlan_context) {
-			DBGPR("  VLAN context descriptor, ctag=%u\n",
-			      packet->vlan_ctag);
+			netif_dbg(pdata, tx_queued, pdata->netdev,
+				  "VLAN context descriptor, ctag=%u\n",
+				  packet->vlan_ctag);
 
 			/* Mark it as a CONTEXT descriptor */
 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1533,6 +1524,8 @@
 				  packet->tcp_payload_len);
 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
 				  packet->tcp_header_len / 4);
+
+		pdata->ext_stats.tx_tso_packets++;
 	} else {
 		/* Enable CRC and Pad Insertion */
 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
@@ -1594,9 +1587,9 @@
 	rdesc = rdata->rdesc;
 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
 
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
-	xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
-#endif
+	if (netif_msg_tx_queued(pdata))
+		xgbe_dump_tx_desc(pdata, ring, start_index,
+				  packet->rdesc_count, 1);
 
 	/* Make sure ownership is written to the descriptor */
 	dma_wmb();
@@ -1618,11 +1611,12 @@
 
 static int xgbe_dev_read(struct xgbe_channel *channel)
 {
+	struct xgbe_prv_data *pdata = channel->pdata;
 	struct xgbe_ring *ring = channel->rx_ring;
 	struct xgbe_ring_data *rdata;
 	struct xgbe_ring_desc *rdesc;
 	struct xgbe_packet_data *packet = &ring->packet_data;
-	struct net_device *netdev = channel->pdata->netdev;
+	struct net_device *netdev = pdata->netdev;
 	unsigned int err, etlt, l34t;
 
 	DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1637,9 +1631,8 @@
 	/* Make sure descriptor fields are read after reading the OWN bit */
 	dma_rmb();
 
-#ifdef XGMAC_ENABLE_RX_DESC_DUMP
-	xgbe_dump_rx_desc(ring, rdesc, ring->cur);
-#endif
+	if (netif_msg_rx_status(pdata))
+		xgbe_dump_rx_desc(pdata, ring, ring->cur);
 
 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
 		/* Timestamp Context Descriptor */
@@ -1661,9 +1654,12 @@
 			       CONTEXT_NEXT, 1);
 
 	/* Get the header length */
-	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
 		rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
 						      RX_NORMAL_DESC2, HL);
+		if (rdata->rx.hdr_len)
+			pdata->ext_stats.rx_split_header_packets++;
+	}
 
 	/* Get the RSS hash */
 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
@@ -1700,14 +1696,14 @@
 		       INCOMPLETE, 0);
 
 	/* Set checksum done indicator as appropriate */
-	if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+	if (netdev->features & NETIF_F_RXCSUM)
 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
 			       CSUM_DONE, 1);
 
 	/* Check for errors (only valid in last descriptor) */
 	err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
 	etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
-	DBGPR("  err=%u, etlt=%#x\n", err, etlt);
+	netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
 
 	if (!err || !etlt) {
 		/* No error if err is 0 or etlt is 0 */
@@ -1718,7 +1714,8 @@
 			packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
 							      RX_NORMAL_DESC0,
 							      OVT);
-			DBGPR("  vlan-ctag=0x%04x\n", packet->vlan_ctag);
+			netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+				  packet->vlan_ctag);
 		}
 	} else {
 		if ((etlt == 0x05) || (etlt == 0x06))
@@ -2026,9 +2023,9 @@
 	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
 
-	netdev_notice(pdata->netdev,
-		      "%d Tx hardware queues, %d byte fifo per queue\n",
-		      pdata->tx_q_count, ((fifo_size + 1) * 256));
+	netif_info(pdata, drv, pdata->netdev,
+		   "%d Tx hardware queues, %d byte fifo per queue\n",
+		   pdata->tx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2042,9 +2039,9 @@
 	for (i = 0; i < pdata->rx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
 
-	netdev_notice(pdata->netdev,
-		      "%d Rx hardware queues, %d byte fifo per queue\n",
-		      pdata->rx_q_count, ((fifo_size + 1) * 256));
+	netif_info(pdata, drv, pdata->netdev,
+		   "%d Rx hardware queues, %d byte fifo per queue\n",
+		   pdata->rx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2063,14 +2060,16 @@
 
 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
 		for (j = 0; j < qptc; j++) {
-			DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+			netif_dbg(pdata, drv, pdata->netdev,
+				  "TXq%u mapped to TC%u\n", queue, i);
 			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
 					       Q2TCMAP, i);
 			pdata->q2tc_map[queue++] = i;
 		}
 
 		if (i < qptc_extra) {
-			DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+			netif_dbg(pdata, drv, pdata->netdev,
+				  "TXq%u mapped to TC%u\n", queue, i);
 			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
 					       Q2TCMAP, i);
 			pdata->q2tc_map[queue++] = i;
@@ -2088,13 +2087,15 @@
 	for (i = 0, prio = 0; i < prio_queues;) {
 		mask = 0;
 		for (j = 0; j < ppq; j++) {
-			DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+			netif_dbg(pdata, drv, pdata->netdev,
+				  "PRIO%u mapped to RXq%u\n", prio, i);
 			mask |= (1 << prio);
 			pdata->prio2q_map[prio++] = i;
 		}
 
 		if (i < ppq_extra) {
-			DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+			netif_dbg(pdata, drv, pdata->netdev,
+				  "PRIO%u mapped to RXq%u\n", prio, i);
 			mask |= (1 << prio);
 			pdata->prio2q_map[prio++] = i;
 		}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 9fd6c69..1e9c28d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -183,9 +183,10 @@
 			channel->rx_ring = rx_ring++;
 		}
 
-		DBGPR("  %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
-		      channel->name, channel->queue_index, channel->dma_regs,
-		      channel->dma_irq, channel->tx_ring, channel->rx_ring);
+		netif_dbg(pdata, drv, pdata->netdev,
+			  "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+			  channel->name, channel->dma_regs, channel->dma_irq,
+			  channel->tx_ring, channel->rx_ring);
 	}
 
 	pdata->channel = channel_mem;
@@ -235,7 +236,8 @@
 	struct xgbe_prv_data *pdata = channel->pdata;
 
 	if (count > xgbe_tx_avail_desc(ring)) {
-		DBGPR("  Tx queue stopped, not enough descriptors available\n");
+		netif_info(pdata, drv, pdata->netdev,
+			   "Tx queue stopped, not enough descriptors available\n");
 		netif_stop_subqueue(pdata->netdev, channel->queue_index);
 		ring->tx.queue_stopped = 1;
 
@@ -330,7 +332,7 @@
 	if (!dma_isr)
 		goto isr_done;
 
-	DBGPR("  DMA_ISR = %08x\n", dma_isr);
+	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
 
 	for (i = 0; i < pdata->channel_count; i++) {
 		if (!(dma_isr & (1 << i)))
@@ -339,7 +341,8 @@
 		channel = pdata->channel + i;
 
 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
-		DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+			  i, dma_ch_isr);
 
 		/* The TI or RI interrupt bits may still be set even if using
 		 * per channel DMA interrupts. Check to be sure those are not
@@ -386,8 +389,6 @@
 		}
 	}
 
-	DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
-
 isr_done:
 	return IRQ_HANDLED;
 }
@@ -436,43 +437,61 @@
 	DBGPR("<--xgbe_tx_timer\n");
 }
 
-static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_service(struct work_struct *work)
+{
+	struct xgbe_prv_data *pdata = container_of(work,
+						   struct xgbe_prv_data,
+						   service_work);
+
+	pdata->phy_if.phy_status(pdata);
+}
+
+static void xgbe_service_timer(unsigned long data)
+{
+	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+	schedule_work(&pdata->service_work);
+
+	mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_init_timers(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_channel *channel;
 	unsigned int i;
 
-	DBGPR("-->xgbe_init_tx_timers\n");
+	setup_timer(&pdata->service_timer, xgbe_service_timer,
+		    (unsigned long)pdata);
 
 	channel = pdata->channel;
 	for (i = 0; i < pdata->channel_count; i++, channel++) {
 		if (!channel->tx_ring)
 			break;
 
-		DBGPR("  %s adding tx timer\n", channel->name);
 		setup_timer(&channel->tx_timer, xgbe_tx_timer,
 			    (unsigned long)channel);
 	}
-
-	DBGPR("<--xgbe_init_tx_timers\n");
 }
 
-static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+	mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_channel *channel;
 	unsigned int i;
 
-	DBGPR("-->xgbe_stop_tx_timers\n");
+	del_timer_sync(&pdata->service_timer);
 
 	channel = pdata->channel;
 	for (i = 0; i < pdata->channel_count; i++, channel++) {
 		if (!channel->tx_ring)
 			break;
 
-		DBGPR("  %s deleting tx timer\n", channel->name);
 		del_timer_sync(&channel->tx_timer);
 	}
-
-	DBGPR("<--xgbe_stop_tx_timers\n");
 }
 
 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -512,6 +531,7 @@
 						RXFIFOSIZE);
 	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
 						TXFIFOSIZE);
+	hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
 	hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
 	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
 	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
@@ -759,112 +779,12 @@
 	DBGPR("<--xgbe_free_rx_data\n");
 }
 
-static void xgbe_adjust_link(struct net_device *netdev)
-{
-	struct xgbe_prv_data *pdata = netdev_priv(netdev);
-	struct xgbe_hw_if *hw_if = &pdata->hw_if;
-	struct phy_device *phydev = pdata->phydev;
-	int new_state = 0;
-
-	if (!phydev)
-		return;
-
-	if (phydev->link) {
-		/* Flow control support */
-		if (pdata->pause_autoneg) {
-			if (phydev->pause || phydev->asym_pause) {
-				pdata->tx_pause = 1;
-				pdata->rx_pause = 1;
-			} else {
-				pdata->tx_pause = 0;
-				pdata->rx_pause = 0;
-			}
-		}
-
-		if (pdata->tx_pause != pdata->phy_tx_pause) {
-			hw_if->config_tx_flow_control(pdata);
-			pdata->phy_tx_pause = pdata->tx_pause;
-		}
-
-		if (pdata->rx_pause != pdata->phy_rx_pause) {
-			hw_if->config_rx_flow_control(pdata);
-			pdata->phy_rx_pause = pdata->rx_pause;
-		}
-
-		/* Speed support */
-		if (phydev->speed != pdata->phy_speed) {
-			new_state = 1;
-
-			switch (phydev->speed) {
-			case SPEED_10000:
-				hw_if->set_xgmii_speed(pdata);
-				break;
-
-			case SPEED_2500:
-				hw_if->set_gmii_2500_speed(pdata);
-				break;
-
-			case SPEED_1000:
-				hw_if->set_gmii_speed(pdata);
-				break;
-			}
-			pdata->phy_speed = phydev->speed;
-		}
-
-		if (phydev->link != pdata->phy_link) {
-			new_state = 1;
-			pdata->phy_link = 1;
-		}
-	} else if (pdata->phy_link) {
-		new_state = 1;
-		pdata->phy_link = 0;
-		pdata->phy_speed = SPEED_UNKNOWN;
-	}
-
-	if (new_state)
-		phy_print_status(phydev);
-}
-
 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
 {
-	struct net_device *netdev = pdata->netdev;
-	struct phy_device *phydev = pdata->phydev;
-	int ret;
-
 	pdata->phy_link = -1;
 	pdata->phy_speed = SPEED_UNKNOWN;
-	pdata->phy_tx_pause = pdata->tx_pause;
-	pdata->phy_rx_pause = pdata->rx_pause;
 
-	ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
-				 pdata->phy_mode);
-	if (ret) {
-		netdev_err(netdev, "phy_connect_direct failed\n");
-		return ret;
-	}
-
-	if (!phydev->drv || (phydev->drv->phy_id == 0)) {
-		netdev_err(netdev, "phy_id not valid\n");
-		ret = -ENODEV;
-		goto err_phy_connect;
-	}
-	DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
-	      dev_name(&phydev->dev), phydev->link);
-
-	return 0;
-
-err_phy_connect:
-	phy_disconnect(phydev);
-
-	return ret;
-}
-
-static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
-{
-	if (!pdata->phydev)
-		return;
-
-	phy_disconnect(pdata->phydev);
+	return pdata->phy_if.phy_reset(pdata);
 }
 
 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
@@ -889,13 +809,14 @@
 
 	netif_tx_stop_all_queues(netdev);
 
+	xgbe_stop_timers(pdata);
+	flush_workqueue(pdata->dev_workqueue);
+
 	hw_if->powerdown_tx(pdata);
 	hw_if->powerdown_rx(pdata);
 
 	xgbe_napi_disable(pdata, 0);
 
-	phy_stop(pdata->phydev);
-
 	pdata->power_down = 1;
 
 	spin_unlock_irqrestore(&pdata->lock, flags);
@@ -924,8 +845,6 @@
 
 	pdata->power_down = 0;
 
-	phy_start(pdata->phydev);
-
 	xgbe_napi_enable(pdata, 0);
 
 	hw_if->powerup_tx(pdata);
@@ -936,6 +855,8 @@
 
 	netif_tx_start_all_queues(netdev);
 
+	xgbe_start_timers(pdata);
+
 	spin_unlock_irqrestore(&pdata->lock, flags);
 
 	DBGPR("<--xgbe_powerup\n");
@@ -946,6 +867,7 @@
 static int xgbe_start(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
+	struct xgbe_phy_if *phy_if = &pdata->phy_if;
 	struct net_device *netdev = pdata->netdev;
 	int ret;
 
@@ -953,7 +875,9 @@
 
 	hw_if->init(pdata);
 
-	phy_start(pdata->phydev);
+	ret = phy_if->phy_start(pdata);
+	if (ret)
+		goto err_phy;
 
 	xgbe_napi_enable(pdata, 1);
 
@@ -964,10 +888,11 @@
 	hw_if->enable_tx(pdata);
 	hw_if->enable_rx(pdata);
 
-	xgbe_init_tx_timers(pdata);
-
 	netif_tx_start_all_queues(netdev);
 
+	xgbe_start_timers(pdata);
+	schedule_work(&pdata->service_work);
+
 	DBGPR("<--xgbe_start\n");
 
 	return 0;
@@ -975,8 +900,9 @@
 err_napi:
 	xgbe_napi_disable(pdata, 1);
 
-	phy_stop(pdata->phydev);
+	phy_if->phy_stop(pdata);
 
+err_phy:
 	hw_if->exit(pdata);
 
 	return ret;
@@ -985,6 +911,7 @@
 static void xgbe_stop(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
+	struct xgbe_phy_if *phy_if = &pdata->phy_if;
 	struct xgbe_channel *channel;
 	struct net_device *netdev = pdata->netdev;
 	struct netdev_queue *txq;
@@ -994,7 +921,8 @@
 
 	netif_tx_stop_all_queues(netdev);
 
-	xgbe_stop_tx_timers(pdata);
+	xgbe_stop_timers(pdata);
+	flush_workqueue(pdata->dev_workqueue);
 
 	hw_if->disable_tx(pdata);
 	hw_if->disable_rx(pdata);
@@ -1003,7 +931,7 @@
 
 	xgbe_napi_disable(pdata, 1);
 
-	phy_stop(pdata->phydev);
+	phy_if->phy_stop(pdata);
 
 	hw_if->exit(pdata);
 
@@ -1374,7 +1302,7 @@
 	ret = clk_prepare_enable(pdata->sysclk);
 	if (ret) {
 		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
-		goto err_phy_init;
+		return ret;
 	}
 
 	ret = clk_prepare_enable(pdata->ptpclk);
@@ -1399,14 +1327,17 @@
 	if (ret)
 		goto err_channels;
 
-	/* Initialize the device restart and Tx timestamp work struct */
+	INIT_WORK(&pdata->service_work, xgbe_service);
 	INIT_WORK(&pdata->restart_work, xgbe_restart);
 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+	xgbe_init_timers(pdata);
 
 	ret = xgbe_start(pdata);
 	if (ret)
 		goto err_rings;
 
+	clear_bit(XGBE_DOWN, &pdata->dev_state);
+
 	DBGPR("<--xgbe_open\n");
 
 	return 0;
@@ -1423,9 +1354,6 @@
 err_sysclk:
 	clk_disable_unprepare(pdata->sysclk);
 
-err_phy_init:
-	xgbe_phy_exit(pdata);
-
 	return ret;
 }
 
@@ -1449,8 +1377,7 @@
 	clk_disable_unprepare(pdata->ptpclk);
 	clk_disable_unprepare(pdata->sysclk);
 
-	/* Release the phy */
-	xgbe_phy_exit(pdata);
+	set_bit(XGBE_DOWN, &pdata->dev_state);
 
 	DBGPR("<--xgbe_close\n");
 
@@ -1478,7 +1405,8 @@
 	ret = NETDEV_TX_OK;
 
 	if (skb->len == 0) {
-		netdev_err(netdev, "empty skb received from stack\n");
+		netif_err(pdata, tx_err, netdev,
+			  "empty skb received from stack\n");
 		dev_kfree_skb_any(skb);
 		goto tx_netdev_return;
 	}
@@ -1494,7 +1422,8 @@
 
 	ret = xgbe_prep_tso(skb, packet);
 	if (ret) {
-		netdev_err(netdev, "error processing TSO packet\n");
+		netif_err(pdata, tx_err, netdev,
+			  "error processing TSO packet\n");
 		dev_kfree_skb_any(skb);
 		goto tx_netdev_return;
 	}
@@ -1513,9 +1442,8 @@
 	/* Configure required descriptor fields for transmission */
 	hw_if->dev_xmit(channel);
 
-#ifdef XGMAC_ENABLE_TX_PKT_DUMP
-	xgbe_print_pkt(netdev, skb, true);
-#endif
+	if (netif_msg_pktdata(pdata))
+		xgbe_print_pkt(netdev, skb, true);
 
 	/* Stop the queue in advance if there may not be enough descriptors */
 	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@ -1710,7 +1638,8 @@
 			       (pdata->q2tc_map[queue] == i))
 				queue++;
 
-			DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
+			netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
+				  i, offset, queue - 1);
 			netdev_set_tc_queue(netdev, i, queue - offset, offset);
 			offset = queue;
 		}
@@ -1820,9 +1749,10 @@
 			  lower_32_bits(rdata->rdesc_dma));
 }
 
-static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+				       struct napi_struct *napi,
 				       struct xgbe_ring_data *rdata,
-				       unsigned int *len)
+				       unsigned int len)
 {
 	struct sk_buff *skb;
 	u8 *packet;
@@ -1832,14 +1762,31 @@
 	if (!skb)
 		return NULL;
 
+	/* Start with the header buffer which may contain just the header
+	 * or the header plus data
+	 */
+	dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
+				rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+
 	packet = page_address(rdata->rx.hdr.pa.pages) +
 		 rdata->rx.hdr.pa.pages_offset;
-	copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+	copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
 	copy_len = min(rdata->rx.hdr.dma_len, copy_len);
 	skb_copy_to_linear_data(skb, packet, copy_len);
 	skb_put(skb, copy_len);
 
-	*len -= copy_len;
+	len -= copy_len;
+	if (len) {
+		/* Add the remaining data as a frag */
+		dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
+					rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+				rdata->rx.buf.pa.pages,
+				rdata->rx.buf.pa.pages_offset,
+				len, rdata->rx.buf.dma_len);
+		rdata->rx.buf.pa.pages = NULL;
+	}
 
 	return skb;
 }
@@ -1877,9 +1824,8 @@
 		 * bit */
 		dma_rmb();
 
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
-		xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
-#endif
+		if (netif_msg_tx_done(pdata))
+			xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
 
 		if (hw_if->is_last_desc(rdesc)) {
 			tx_packets += rdata->tx.packets;
@@ -1922,7 +1868,7 @@
 	struct sk_buff *skb;
 	struct skb_shared_hwtstamps *hwtstamps;
 	unsigned int incomplete, error, context_next, context;
-	unsigned int len, put_len, max_len;
+	unsigned int len, rdesc_len, max_len;
 	unsigned int received = 0;
 	int packet_count = 0;
 
@@ -1932,6 +1878,9 @@
 	if (!ring)
 		return 0;
 
+	incomplete = 0;
+	context_next = 0;
+
 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
 
 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1941,15 +1890,11 @@
 
 		/* First time in loop see if we need to restore state */
 		if (!received && rdata->state_saved) {
-			incomplete = rdata->state.incomplete;
-			context_next = rdata->state.context_next;
 			skb = rdata->state.skb;
 			error = rdata->state.error;
 			len = rdata->state.len;
 		} else {
 			memset(packet, 0, sizeof(*packet));
-			incomplete = 0;
-			context_next = 0;
 			skb = NULL;
 			error = 0;
 			len = 0;
@@ -1983,29 +1928,23 @@
 
 		if (error || packet->errors) {
 			if (packet->errors)
-				DBGPR("Error in received packet\n");
+				netif_err(pdata, rx_err, netdev,
+					  "error in received packet\n");
 			dev_kfree_skb(skb);
 			goto next_packet;
 		}
 
 		if (!context) {
-			put_len = rdata->rx.len - len;
-			len += put_len;
+			/* Length is cumulative, get this descriptor's length */
+			rdesc_len = rdata->rx.len - len;
+			len += rdesc_len;
 
-			if (!skb) {
-				dma_sync_single_for_cpu(pdata->dev,
-							rdata->rx.hdr.dma,
-							rdata->rx.hdr.dma_len,
-							DMA_FROM_DEVICE);
-
-				skb = xgbe_create_skb(napi, rdata, &put_len);
-				if (!skb) {
+			if (rdesc_len && !skb) {
+				skb = xgbe_create_skb(pdata, napi, rdata,
+						      rdesc_len);
+				if (!skb)
 					error = 1;
-					goto skip_data;
-				}
-			}
-
-			if (put_len) {
+			} else if (rdesc_len) {
 				dma_sync_single_for_cpu(pdata->dev,
 							rdata->rx.buf.dma,
 							rdata->rx.buf.dma_len,
@@ -2014,12 +1953,12 @@
 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 						rdata->rx.buf.pa.pages,
 						rdata->rx.buf.pa.pages_offset,
-						put_len, rdata->rx.buf.dma_len);
+						rdesc_len,
+						rdata->rx.buf.dma_len);
 				rdata->rx.buf.pa.pages = NULL;
 			}
 		}
 
-skip_data:
 		if (incomplete || context_next)
 			goto read_again;
 
@@ -2033,14 +1972,14 @@
 			max_len += VLAN_HLEN;
 
 		if (skb->len > max_len) {
-			DBGPR("packet length exceeds configured MTU\n");
+			netif_err(pdata, rx_err, netdev,
+				  "packet length exceeds configured MTU\n");
 			dev_kfree_skb(skb);
 			goto next_packet;
 		}
 
-#ifdef XGMAC_ENABLE_RX_PKT_DUMP
-		xgbe_print_pkt(netdev, skb, false);
-#endif
+		if (netif_msg_pktdata(pdata))
+			xgbe_print_pkt(netdev, skb, false);
 
 		skb_checksum_none_assert(skb);
 		if (XGMAC_GET_BITS(packet->attributes,
@@ -2072,7 +2011,6 @@
 		skb_record_rx_queue(skb, channel->queue_index);
 		skb_mark_napi_id(skb, napi);
 
-		netdev->last_rx = jiffies;
 		napi_gro_receive(napi, skb);
 
 next_packet:
@@ -2083,8 +2021,6 @@
 	if (received && (incomplete || context_next)) {
 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 		rdata->state_saved = 1;
-		rdata->state.incomplete = incomplete;
-		rdata->state.context_next = context_next;
 		rdata->state.skb = skb;
 		rdata->state.len = len;
 		rdata->state.error = error;
@@ -2165,8 +2101,8 @@
 	return processed;
 }
 
-void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
-		       unsigned int count, unsigned int flag)
+void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+		       unsigned int idx, unsigned int count, unsigned int flag)
 {
 	struct xgbe_ring_data *rdata;
 	struct xgbe_ring_desc *rdesc;
@@ -2174,20 +2110,29 @@
 	while (count--) {
 		rdata = XGBE_GET_DESC_DATA(ring, idx);
 		rdesc = rdata->rdesc;
-		pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
-			 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
-			 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
-			 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+		netdev_dbg(pdata->netdev,
+			   "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+			   (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+			   le32_to_cpu(rdesc->desc0),
+			   le32_to_cpu(rdesc->desc1),
+			   le32_to_cpu(rdesc->desc2),
+			   le32_to_cpu(rdesc->desc3));
 		idx++;
 	}
 }
 
-void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
 		       unsigned int idx)
 {
-	pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
-		 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
-		 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+	struct xgbe_ring_data *rdata;
+	struct xgbe_ring_desc *rdesc;
+
+	rdata = XGBE_GET_DESC_DATA(ring, idx);
+	rdesc = rdata->rdesc;
+	netdev_dbg(pdata->netdev,
+		   "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+		   idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+		   le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
 }
 
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
@@ -2197,21 +2142,21 @@
 	unsigned char buffer[128];
 	unsigned int i, j;
 
-	netdev_alert(netdev, "\n************** SKB dump ****************\n");
+	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
-	netdev_alert(netdev, "%s packet of %d bytes\n",
-		     (tx_rx ? "TX" : "RX"), skb->len);
+	netdev_dbg(netdev, "%s packet of %d bytes\n",
+		   (tx_rx ? "TX" : "RX"), skb->len);
 
-	netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
-	netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
-	netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+	netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
 	for (i = 0, j = 0; i < skb->len;) {
 		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
 			      buf[i++]);
 
 		if ((i % 32) == 0) {
-			netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
+			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
 			j = 0;
 		} else if ((i % 16) == 0) {
 			buffer[j++] = ' ';
@@ -2221,7 +2166,7 @@
 		}
 	}
 	if (i % 32)
-		netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
+		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
-	netdev_alert(netdev, "\n************** SKB dump ****************\n");
+	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 5f149e8..59e090e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -133,6 +133,12 @@
 	  offsetof(struct xgbe_prv_data, mmc_stats._var),	\
 	}
 
+#define XGMAC_EXT_STAT(_string, _var)				\
+	{ _string,						\
+	  FIELD_SIZEOF(struct xgbe_ext_stats, _var),		\
+	  offsetof(struct xgbe_prv_data, ext_stats._var),	\
+	}
+
 static const struct xgbe_stats xgbe_gstring_stats[] = {
 	XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
 	XGMAC_MMC_STAT("tx_packets", txframecount_gb),
@@ -140,6 +146,7 @@
 	XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
 	XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
 	XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+	XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
 	XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
 	XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
 	XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
@@ -171,6 +178,7 @@
 	XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
 	XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
 	XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+	XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
 };
 
 #define XGBE_STATS_COUNT	ARRAY_SIZE(xgbe_gstring_stats)
@@ -239,9 +247,9 @@
 
 	DBGPR("-->xgbe_get_pauseparam\n");
 
-	pause->autoneg = pdata->pause_autoneg;
-	pause->tx_pause = pdata->tx_pause;
-	pause->rx_pause = pdata->rx_pause;
+	pause->autoneg = pdata->phy.pause_autoneg;
+	pause->tx_pause = pdata->phy.tx_pause;
+	pause->rx_pause = pdata->phy.rx_pause;
 
 	DBGPR("<--xgbe_get_pauseparam\n");
 }
@@ -250,7 +258,6 @@
 			       struct ethtool_pauseparam *pause)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
-	struct phy_device *phydev = pdata->phydev;
 	int ret = 0;
 
 	DBGPR("-->xgbe_set_pauseparam\n");
@@ -258,21 +265,26 @@
 	DBGPR("  autoneg = %d, tx_pause = %d, rx_pause = %d\n",
 	      pause->autoneg, pause->tx_pause, pause->rx_pause);
 
-	pdata->pause_autoneg = pause->autoneg;
-	if (pause->autoneg) {
-		phydev->advertising |= ADVERTISED_Pause;
-		phydev->advertising |= ADVERTISED_Asym_Pause;
+	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+		return -EINVAL;
 
-	} else {
-		phydev->advertising &= ~ADVERTISED_Pause;
-		phydev->advertising &= ~ADVERTISED_Asym_Pause;
+	pdata->phy.pause_autoneg = pause->autoneg;
+	pdata->phy.tx_pause = pause->tx_pause;
+	pdata->phy.rx_pause = pause->rx_pause;
 
-		pdata->tx_pause = pause->tx_pause;
-		pdata->rx_pause = pause->rx_pause;
+	pdata->phy.advertising &= ~ADVERTISED_Pause;
+	pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+	if (pause->rx_pause) {
+		pdata->phy.advertising |= ADVERTISED_Pause;
+		pdata->phy.advertising |= ADVERTISED_Asym_Pause;
 	}
 
+	if (pause->tx_pause)
+		pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
 	if (netif_running(netdev))
-		ret = phy_start_aneg(phydev);
+		ret = pdata->phy_if.phy_config_aneg(pdata);
 
 	DBGPR("<--xgbe_set_pauseparam\n");
 
@@ -283,36 +295,39 @@
 			     struct ethtool_cmd *cmd)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
-	int ret;
 
 	DBGPR("-->xgbe_get_settings\n");
 
-	if (!pdata->phydev)
-		return -ENODEV;
+	cmd->phy_address = pdata->phy.address;
 
-	ret = phy_ethtool_gset(pdata->phydev, cmd);
+	cmd->supported = pdata->phy.supported;
+	cmd->advertising = pdata->phy.advertising;
+	cmd->lp_advertising = pdata->phy.lp_advertising;
+
+	cmd->autoneg = pdata->phy.autoneg;
+	ethtool_cmd_speed_set(cmd, pdata->phy.speed);
+	cmd->duplex = pdata->phy.duplex;
+
+	cmd->port = PORT_NONE;
+	cmd->transceiver = XCVR_INTERNAL;
 
 	DBGPR("<--xgbe_get_settings\n");
 
-	return ret;
+	return 0;
 }
 
 static int xgbe_set_settings(struct net_device *netdev,
 			     struct ethtool_cmd *cmd)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
-	struct phy_device *phydev = pdata->phydev;
 	u32 speed;
 	int ret;
 
 	DBGPR("-->xgbe_set_settings\n");
 
-	if (!pdata->phydev)
-		return -ENODEV;
-
 	speed = ethtool_cmd_speed(cmd);
 
-	if (cmd->phy_address != phydev->addr)
+	if (cmd->phy_address != pdata->phy.address)
 		return -EINVAL;
 
 	if ((cmd->autoneg != AUTONEG_ENABLE) &&
@@ -333,23 +348,23 @@
 			return -EINVAL;
 	}
 
-	cmd->advertising &= phydev->supported;
+	cmd->advertising &= pdata->phy.supported;
 	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
 		return -EINVAL;
 
 	ret = 0;
-	phydev->autoneg = cmd->autoneg;
-	phydev->speed = speed;
-	phydev->duplex = cmd->duplex;
-	phydev->advertising = cmd->advertising;
+	pdata->phy.autoneg = cmd->autoneg;
+	pdata->phy.speed = speed;
+	pdata->phy.duplex = cmd->duplex;
+	pdata->phy.advertising = cmd->advertising;
 
 	if (cmd->autoneg == AUTONEG_ENABLE)
-		phydev->advertising |= ADVERTISED_Autoneg;
+		pdata->phy.advertising |= ADVERTISED_Autoneg;
 	else
-		phydev->advertising &= ~ADVERTISED_Autoneg;
+		pdata->phy.advertising &= ~ADVERTISED_Autoneg;
 
 	if (netif_running(netdev))
-		ret = phy_start_aneg(phydev);
+		ret = pdata->phy_if.phy_config_aneg(pdata);
 
 	DBGPR("<--xgbe_set_settings\n");
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 6d2c702..e83bd76 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -124,9 +124,11 @@
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 #include <linux/clk.h>
 #include <linux/property.h>
 #include <linux/acpi.h>
+#include <linux/mdio.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -136,6 +138,49 @@
 MODULE_VERSION(XGBE_DRV_VERSION);
 MODULE_DESCRIPTION(XGBE_DRV_DESC);
 
+static int debug = -1;
+module_param(debug, int, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+				      NETIF_MSG_IFUP);
+
+static const u32 xgbe_serdes_blwc[] = {
+	XGBE_SPEED_1000_BLWC,
+	XGBE_SPEED_2500_BLWC,
+	XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_serdes_cdr_rate[] = {
+	XGBE_SPEED_1000_CDR,
+	XGBE_SPEED_2500_CDR,
+	XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_serdes_pq_skew[] = {
+	XGBE_SPEED_1000_PQ,
+	XGBE_SPEED_2500_PQ,
+	XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_serdes_tx_amp[] = {
+	XGBE_SPEED_1000_TXAMP,
+	XGBE_SPEED_2500_TXAMP,
+	XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_serdes_dfe_tap_cfg[] = {
+	XGBE_SPEED_1000_DFE_TAP_CONFIG,
+	XGBE_SPEED_2500_DFE_TAP_CONFIG,
+	XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_serdes_dfe_tap_ena[] = {
+	XGBE_SPEED_1000_DFE_TAP_ENABLE,
+	XGBE_SPEED_2500_DFE_TAP_ENABLE,
+	XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
 static void xgbe_default_config(struct xgbe_prv_data *pdata)
 {
 	DBGPR("-->xgbe_default_config\n");
@@ -153,8 +198,6 @@
 	pdata->rx_pause = 1;
 	pdata->phy_speed = SPEED_UNKNOWN;
 	pdata->power_down = 0;
-	pdata->default_autoneg = AUTONEG_ENABLE;
-	pdata->default_speed = SPEED_10000;
 
 	DBGPR("<--xgbe_default_config\n");
 }
@@ -162,6 +205,7 @@
 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
 {
 	xgbe_init_function_ptrs_dev(&pdata->hw_if);
+	xgbe_init_function_ptrs_phy(&pdata->phy_if);
 	xgbe_init_function_ptrs_desc(&pdata->desc_if);
 }
 
@@ -222,23 +266,82 @@
 
 	return 0;
 }
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+	struct device *dev = pdata->dev;
+	struct device_node *phy_node;
+	struct platform_device *phy_pdev;
+
+	phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+	if (phy_node) {
+		/* Old style device tree:
+		 *   The XGBE and PHY resources are separate
+		 */
+		phy_pdev = of_find_device_by_node(phy_node);
+		of_node_put(phy_node);
+	} else {
+		/* New style device tree:
+		 *   The XGBE and PHY resources are grouped together with
+		 *   the PHY resources listed last
+		 */
+		get_device(dev);
+		phy_pdev = pdata->pdev;
+	}
+
+	return phy_pdev;
+}
 #else   /* CONFIG_OF */
 static int xgbe_of_support(struct xgbe_prv_data *pdata)
 {
 	return -EINVAL;
 }
-#endif  /*CONFIG_OF */
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+	return NULL;
+}
+#endif  /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+					unsigned int type)
+{
+	unsigned int count;
+	int i;
+
+	for (i = 0, count = 0; i < pdev->num_resources; i++) {
+		struct resource *res = &pdev->resource[i];
+
+		if (type == resource_type(res))
+			count++;
+	}
+
+	return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+	struct platform_device *phy_pdev;
+
+	if (pdata->use_acpi) {
+		get_device(pdata->dev);
+		phy_pdev = pdata->pdev;
+	} else {
+		phy_pdev = xgbe_of_get_phy_pdev(pdata);
+	}
+
+	return phy_pdev;
+}
 
 static int xgbe_probe(struct platform_device *pdev)
 {
 	struct xgbe_prv_data *pdata;
-	struct xgbe_hw_if *hw_if;
-	struct xgbe_desc_if *desc_if;
 	struct net_device *netdev;
-	struct device *dev = &pdev->dev;
+	struct device *dev = &pdev->dev, *phy_dev;
+	struct platform_device *phy_pdev;
 	struct resource *res;
 	const char *phy_mode;
-	unsigned int i;
+	unsigned int i, phy_memnum, phy_irqnum;
 	int ret;
 
 	DBGPR("--> xgbe_probe\n");
@@ -263,9 +366,36 @@
 	mutex_init(&pdata->rss_mutex);
 	spin_lock_init(&pdata->tstamp_lock);
 
+	pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+	set_bit(XGBE_DOWN, &pdata->dev_state);
+
 	/* Check if we should use ACPI or DT */
 	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
 
+	phy_pdev = xgbe_get_phy_pdev(pdata);
+	if (!phy_pdev) {
+		dev_err(dev, "unable to obtain phy device\n");
+		ret = -EINVAL;
+		goto err_phydev;
+	}
+	phy_dev = &phy_pdev->dev;
+
+	if (pdev == phy_pdev) {
+		/* New style device tree or ACPI:
+		 *   The XGBE and PHY resources are grouped together with
+		 *   the PHY resources listed last
+		 */
+		phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+		phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+	} else {
+		/* Old style device tree:
+		 *   The XGBE and PHY resources are separate
+		 */
+		phy_memnum = 0;
+		phy_irqnum = 0;
+	}
+
 	/* Set and validate the number of descriptors for a ring */
 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
 	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -292,7 +422,8 @@
 		ret = PTR_ERR(pdata->xgmac_regs);
 		goto err_io;
 	}
-	DBGPR("  xgmac_regs = %p\n", pdata->xgmac_regs);
+	if (netif_msg_probe(pdata))
+		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
@@ -301,7 +432,38 @@
 		ret = PTR_ERR(pdata->xpcs_regs);
 		goto err_io;
 	}
-	DBGPR("  xpcs_regs  = %p\n", pdata->xpcs_regs);
+	if (netif_msg_probe(pdata))
+		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
+
+	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+	pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pdata->rxtx_regs)) {
+		dev_err(dev, "rxtx ioremap failed\n");
+		ret = PTR_ERR(pdata->rxtx_regs);
+		goto err_io;
+	}
+	if (netif_msg_probe(pdata))
+		dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);
+
+	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+	pdata->sir0_regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pdata->sir0_regs)) {
+		dev_err(dev, "sir0 ioremap failed\n");
+		ret = PTR_ERR(pdata->sir0_regs);
+		goto err_io;
+	}
+	if (netif_msg_probe(pdata))
+		dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);
+
+	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+	pdata->sir1_regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pdata->sir1_regs)) {
+		dev_err(dev, "sir1 ioremap failed\n");
+		ret = PTR_ERR(pdata->sir1_regs);
+		goto err_io;
+	}
+	if (netif_msg_probe(pdata))
+		dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);
 
 	/* Retrieve the MAC address */
 	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
@@ -329,6 +491,115 @@
 	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
 		pdata->per_channel_irq = 1;
 
+	/* Retrieve the PHY speedset */
+	ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
+				       &pdata->speed_set);
+	if (ret) {
+		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+		goto err_io;
+	}
+
+	switch (pdata->speed_set) {
+	case XGBE_SPEEDSET_1000_10000:
+	case XGBE_SPEEDSET_2500_10000:
+		break;
+	default:
+		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+		ret = -EINVAL;
+		goto err_io;
+	}
+
+	/* Retrieve the PHY configuration properties */
+	if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_BLWC_PROPERTY,
+						     pdata->serdes_blwc,
+						     XGBE_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_BLWC_PROPERTY);
+			goto err_io;
+		}
+	} else {
+		memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
+		       sizeof(pdata->serdes_blwc));
+	}
+
+	if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_CDR_RATE_PROPERTY,
+						     pdata->serdes_cdr_rate,
+						     XGBE_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_CDR_RATE_PROPERTY);
+			goto err_io;
+		}
+	} else {
+		memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
+		       sizeof(pdata->serdes_cdr_rate));
+	}
+
+	if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_PQ_SKEW_PROPERTY,
+						     pdata->serdes_pq_skew,
+						     XGBE_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_PQ_SKEW_PROPERTY);
+			goto err_io;
+		}
+	} else {
+		memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
+		       sizeof(pdata->serdes_pq_skew));
+	}
+
+	if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_TX_AMP_PROPERTY,
+						     pdata->serdes_tx_amp,
+						     XGBE_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_TX_AMP_PROPERTY);
+			goto err_io;
+		}
+	} else {
+		memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
+		       sizeof(pdata->serdes_tx_amp));
+	}
+
+	if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_DFE_CFG_PROPERTY,
+						     pdata->serdes_dfe_tap_cfg,
+						     XGBE_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_DFE_CFG_PROPERTY);
+			goto err_io;
+		}
+	} else {
+		memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
+		       sizeof(pdata->serdes_dfe_tap_cfg));
+	}
+
+	if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_DFE_ENA_PROPERTY,
+						     pdata->serdes_dfe_tap_ena,
+						     XGBE_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_DFE_ENA_PROPERTY);
+			goto err_io;
+		}
+	} else {
+		memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
+		       sizeof(pdata->serdes_dfe_tap_ena));
+	}
+
 	/* Obtain device settings unique to ACPI/OF */
 	if (pdata->use_acpi)
 		ret = xgbe_acpi_support(pdata);
@@ -357,17 +628,23 @@
 	}
 	pdata->dev_irq = ret;
 
+	/* Get the auto-negotiation interrupt */
+	ret = platform_get_irq(phy_pdev, phy_irqnum++);
+	if (ret < 0) {
+		dev_err(dev, "platform_get_irq phy 0 failed\n");
+		goto err_io;
+	}
+	pdata->an_irq = ret;
+
 	netdev->irq = pdata->dev_irq;
 	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
 	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
 
 	/* Set all the function pointers */
 	xgbe_init_all_fptrs(pdata);
-	hw_if = &pdata->hw_if;
-	desc_if = &pdata->desc_if;
 
 	/* Issue software reset to device */
-	hw_if->exit(pdata);
+	pdata->hw_if.exit(pdata);
 
 	/* Populate the hardware features */
 	xgbe_get_all_hw_features(pdata);
@@ -376,8 +653,6 @@
 	xgbe_default_config(pdata);
 
 	/* Set the DMA mask */
-	if (!dev->dma_mask)
-		dev->dma_mask = &dev->coherent_dma_mask;
 	ret = dma_set_mask_and_coherent(dev,
 					DMA_BIT_MASK(pdata->hw_feat.dma_width));
 	if (ret) {
@@ -422,16 +697,8 @@
 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
-	/* Prepare to regsiter with MDIO */
-	pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
-	if (!pdata->mii_bus_id) {
-		dev_err(dev, "failed to allocate mii bus id\n");
-		ret = -ENOMEM;
-		goto err_io;
-	}
-	ret = xgbe_mdio_register(pdata);
-	if (ret)
-		goto err_bus_id;
+	/* Call MDIO/PHY initialization routine */
+	pdata->phy_if.phy_init(pdata);
 
 	/* Set device operations */
 	netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -476,26 +743,52 @@
 	ret = register_netdev(netdev);
 	if (ret) {
 		dev_err(dev, "net device registration failed\n");
-		goto err_reg_netdev;
+		goto err_io;
+	}
+
+	/* Create the PHY/ANEG name based on netdev name */
+	snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+		 netdev_name(netdev));
+
+	/* Create workqueues */
+	pdata->dev_workqueue =
+		create_singlethread_workqueue(netdev_name(netdev));
+	if (!pdata->dev_workqueue) {
+		netdev_err(netdev, "device workqueue creation failed\n");
+		ret = -ENOMEM;
+		goto err_netdev;
+	}
+
+	pdata->an_workqueue =
+		create_singlethread_workqueue(pdata->an_name);
+	if (!pdata->an_workqueue) {
+		netdev_err(netdev, "phy workqueue creation failed\n");
+		ret = -ENOMEM;
+		goto err_wq;
 	}
 
 	xgbe_ptp_register(pdata);
 
 	xgbe_debugfs_init(pdata);
 
+	platform_device_put(phy_pdev);
+
 	netdev_notice(netdev, "net device enabled\n");
 
 	DBGPR("<-- xgbe_probe\n");
 
 	return 0;
 
-err_reg_netdev:
-	xgbe_mdio_unregister(pdata);
+err_wq:
+	destroy_workqueue(pdata->dev_workqueue);
 
-err_bus_id:
-	kfree(pdata->mii_bus_id);
+err_netdev:
+	unregister_netdev(netdev);
 
 err_io:
+	platform_device_put(phy_pdev);
+
+err_phydev:
 	free_netdev(netdev);
 
 err_alloc:
@@ -515,12 +808,14 @@
 
 	xgbe_ptp_unregister(pdata);
 
+	flush_workqueue(pdata->an_workqueue);
+	destroy_workqueue(pdata->an_workqueue);
+
+	flush_workqueue(pdata->dev_workqueue);
+	destroy_workqueue(pdata->dev_workqueue);
+
 	unregister_netdev(netdev);
 
-	xgbe_mdio_unregister(pdata);
-
-	kfree(pdata->mii_bus_id);
-
 	free_netdev(netdev);
 
 	DBGPR("<--xgbe_remove\n");
@@ -532,16 +827,17 @@
 static int xgbe_suspend(struct device *dev)
 {
 	struct net_device *netdev = dev_get_drvdata(dev);
-	int ret;
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	int ret = 0;
 
 	DBGPR("-->xgbe_suspend\n");
 
-	if (!netif_running(netdev)) {
-		DBGPR("<--xgbe_dev_suspend\n");
-		return -EINVAL;
-	}
+	if (netif_running(netdev))
+		ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
 
-	ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+	pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+	pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
 
 	DBGPR("<--xgbe_suspend\n");
 
@@ -551,16 +847,16 @@
 static int xgbe_resume(struct device *dev)
 {
 	struct net_device *netdev = dev_get_drvdata(dev);
-	int ret;
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	int ret = 0;
 
 	DBGPR("-->xgbe_resume\n");
 
-	if (!netif_running(netdev)) {
-		DBGPR("<--xgbe_dev_resume\n");
-		return -EINVAL;
-	}
+	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
 
-	ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+	if (netif_running(netdev))
+		ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
 
 	DBGPR("<--xgbe_resume\n");
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 59e267f..9088c3a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -119,194 +119,1246 @@
 #include <linux/mdio.h>
 #include <linux/phy.h>
 #include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
 
-static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
 {
-	struct xgbe_prv_data *pdata = mii->priv;
-	struct xgbe_hw_if *hw_if = &pdata->hw_if;
-	int mmd_data;
+	unsigned int reg;
 
-	DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
-		   prtad, mmd_reg);
+	reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
 
-	mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
-
-	DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
-
-	return mmd_data;
+	reg |= XGBE_KR_TRAINING_ENABLE;
+	XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
 }
 
-static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
-			   u16 mmd_val)
+static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
 {
-	struct xgbe_prv_data *pdata = mii->priv;
-	struct xgbe_hw_if *hw_if = &pdata->hw_if;
-	int mmd_data = mmd_val;
+	unsigned int reg;
 
-	DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
-		   prtad, mmd_reg, mmd_data);
+	reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
 
-	hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
-
-	DBGPR_MDIO("<--xgbe_mdio_write\n");
-
-	return 0;
+	reg &= ~XGBE_KR_TRAINING_ENABLE;
+	XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
 }
 
-void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
 {
-	struct device *dev = pdata->dev;
-	struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
-	int i;
+	unsigned int reg;
 
-	dev_alert(dev, "\n************* PHY Reg dump **********************\n");
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
 
-	dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
-		  XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
-	dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
-		  XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
-	dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
-		  XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
-	dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
-		  XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
-	dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
-		  XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
-	dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
-		  XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+	reg |= MDIO_CTRL1_LPOWER;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
 
-	dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
-		  XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
-	dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
-		  XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
-	dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
-		  MDIO_AN_ADVERTISE,
-		  XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
-	dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
-		  MDIO_AN_ADVERTISE + 1,
-		  XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
-	dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
-		  MDIO_AN_ADVERTISE + 2,
-		  XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
-	dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
-		  MDIO_AN_COMP_STAT,
-		  XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+	usleep_range(75, 100);
 
-	dev_alert(dev, "MMD Device Mask = %#x\n",
-		  phydev->c45_ids.devices_in_package);
-	for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
-		dev_alert(dev, "  MMD %d: ID = %#08x\n", i,
-			  phydev->c45_ids.device_ids[i]);
-
-	dev_alert(dev, "\n*************************************************\n");
+	reg &= ~MDIO_CTRL1_LPOWER;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
 }
 
-int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
 {
-	struct mii_bus *mii;
-	struct phy_device *phydev;
-	int ret = 0;
+	/* Assert Rx and Tx ratechange */
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
 
-	DBGPR("-->xgbe_mdio_register\n");
+static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+	unsigned int wait;
+	u16 status;
 
-	mii = mdiobus_alloc();
-	if (!mii) {
-		dev_err(pdata->dev, "mdiobus_alloc failed\n");
-		return -ENOMEM;
+	/* Release Rx and Tx ratechange */
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+	/* Wait for Rx and Tx ready */
+	wait = XGBE_RATECHANGE_COUNT;
+	while (wait--) {
+		usleep_range(50, 75);
+
+		status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+		if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+		    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+			goto rx_reset;
 	}
 
-	/* Register on the MDIO bus (don't probe any PHYs) */
-	mii->name = XGBE_PHY_NAME;
-	mii->read = xgbe_mdio_read;
-	mii->write = xgbe_mdio_write;
-	snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
-	mii->priv = pdata;
-	mii->phy_mask = ~0;
-	mii->parent = pdata->dev;
-	ret = mdiobus_register(mii);
-	if (ret) {
-		dev_err(pdata->dev, "mdiobus_register failed\n");
-		goto err_mdiobus_alloc;
-	}
-	DBGPR("  mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
+	netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+		  status);
 
-	/* Probe the PCS using Clause 45 */
-	phydev = get_phy_device(mii, XGBE_PRTAD, true);
-	if (IS_ERR(phydev) || !phydev ||
-	    !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
-		dev_err(pdata->dev, "get_phy_device failed\n");
-		ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
-		goto err_mdiobus_register;
-	}
-	request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
-		       MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+rx_reset:
+	/* Perform Rx reset for the DFE changes */
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
 
-	ret = phy_device_register(phydev);
-	if (ret) {
-		dev_err(pdata->dev, "phy_device_register failed\n");
-		goto err_phy_device;
+static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+	unsigned int reg;
+
+	/* Enable KR training */
+	xgbe_an_enable_kr_training(pdata);
+
+	/* Set MAC to 10G speed */
+	pdata->hw_if.set_xgmii_speed(pdata);
+
+	/* Set PCS to KR/10G speed */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+	reg &= ~MDIO_PCS_CTRL2_TYPE;
+	reg |= MDIO_PCS_CTRL2_10GBR;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+	reg &= ~MDIO_CTRL1_SPEEDSEL;
+	reg |= MDIO_CTRL1_SPEED10G;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+	xgbe_pcs_power_cycle(pdata);
+
+	/* Set SerDes to 10G speed */
+	xgbe_serdes_start_ratechange(pdata);
+
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+			   pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+			   pdata->serdes_tx_amp[XGBE_SPEED_10000]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+			   pdata->serdes_blwc[XGBE_SPEED_10000]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+			   pdata->serdes_pq_skew[XGBE_SPEED_10000]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+			   pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
+	XRXTX_IOWRITE(pdata, RXTX_REG22,
+		      pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
+
+	xgbe_serdes_complete_ratechange(pdata);
+
+	netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+	unsigned int reg;
+
+	/* Disable KR training */
+	xgbe_an_disable_kr_training(pdata);
+
+	/* Set MAC to 2.5G speed */
+	pdata->hw_if.set_gmii_2500_speed(pdata);
+
+	/* Set PCS to KX/1G speed */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+	reg &= ~MDIO_PCS_CTRL2_TYPE;
+	reg |= MDIO_PCS_CTRL2_10GBX;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+	reg &= ~MDIO_CTRL1_SPEEDSEL;
+	reg |= MDIO_CTRL1_SPEED1G;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+	xgbe_pcs_power_cycle(pdata);
+
+	/* Set SerDes to 2.5G speed */
+	xgbe_serdes_start_ratechange(pdata);
+
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+			   pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+			   pdata->serdes_tx_amp[XGBE_SPEED_2500]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+			   pdata->serdes_blwc[XGBE_SPEED_2500]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+			   pdata->serdes_pq_skew[XGBE_SPEED_2500]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+			   pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
+	XRXTX_IOWRITE(pdata, RXTX_REG22,
+		      pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+
+	xgbe_serdes_complete_ratechange(pdata);
+
+	netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+{
+	unsigned int reg;
+
+	/* Disable KR training */
+	xgbe_an_disable_kr_training(pdata);
+
+	/* Set MAC to 1G speed */
+	pdata->hw_if.set_gmii_speed(pdata);
+
+	/* Set PCS to KX/1G speed */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+	reg &= ~MDIO_PCS_CTRL2_TYPE;
+	reg |= MDIO_PCS_CTRL2_10GBX;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+	reg &= ~MDIO_CTRL1_SPEEDSEL;
+	reg |= MDIO_CTRL1_SPEED1G;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+	xgbe_pcs_power_cycle(pdata);
+
+	/* Set SerDes to 1G speed */
+	xgbe_serdes_start_ratechange(pdata);
+
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+			   pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
+	XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+			   pdata->serdes_tx_amp[XGBE_SPEED_1000]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+			   pdata->serdes_blwc[XGBE_SPEED_1000]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+			   pdata->serdes_pq_skew[XGBE_SPEED_1000]);
+	XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+			   pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
+	XRXTX_IOWRITE(pdata, RXTX_REG22,
+		      pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+
+	xgbe_serdes_complete_ratechange(pdata);
+
+	netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
+			  enum xgbe_mode *mode)
+{
+	unsigned int reg;
+
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+	if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+		*mode = XGBE_MODE_KR;
+	else
+		*mode = XGBE_MODE_KX;
+}
+
+static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+	enum xgbe_mode mode;
+
+	xgbe_cur_mode(pdata, &mode);
+
+	return (mode == XGBE_MODE_KR);
+}
+
+static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+	/* If we are in KR switch to KX, and vice-versa */
+	if (xgbe_in_kr_mode(pdata)) {
+		if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
+			xgbe_gmii_mode(pdata);
+		else
+			xgbe_gmii_2500_mode(pdata);
+	} else {
+		xgbe_xgmii_mode(pdata);
 	}
-	if (!phydev->dev.driver) {
-		dev_err(pdata->dev, "phy driver probe failed\n");
-		ret = -EIO;
-		goto err_phy_device;
+}
+
+static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+			  enum xgbe_mode mode)
+{
+	enum xgbe_mode cur_mode;
+
+	xgbe_cur_mode(pdata, &cur_mode);
+	if (mode != cur_mode)
+		xgbe_switch_mode(pdata);
+}
+
+static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+	if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+		if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+			return true;
+	} else {
+		if (pdata->phy.speed == SPEED_10000)
+			return true;
 	}
 
-	/* Add a reference to the PHY driver so it can't be unloaded */
-	pdata->phy_module = phydev->dev.driver->owner;
-	if (!try_module_get(pdata->phy_module)) {
-		dev_err(pdata->dev, "try_module_get failed\n");
-		ret = -EIO;
-		goto err_phy_device;
+	return false;
+}
+
+static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+	if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+		if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+			return true;
+	} else {
+		if (pdata->phy.speed == SPEED_2500)
+			return true;
 	}
 
-	pdata->mii = mii;
-	pdata->mdio_mmd = MDIO_MMD_PCS;
+	return false;
+}
 
-	phydev->autoneg = pdata->default_autoneg;
-	if (phydev->autoneg == AUTONEG_DISABLE) {
-		phydev->speed = pdata->default_speed;
-		phydev->duplex = DUPLEX_FULL;
-
-		phydev->advertising &= ~ADVERTISED_Autoneg;
+static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+{
+	if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+		if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+			return true;
+	} else {
+		if (pdata->phy.speed == SPEED_1000)
+			return true;
 	}
 
-	pdata->phydev = phydev;
+	return false;
+}
 
-	DBGPHY_REGS(pdata);
+static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+{
+	unsigned int reg;
 
-	DBGPR("<--xgbe_mdio_register\n");
+	reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+	reg &= ~MDIO_AN_CTRL1_ENABLE;
 
-	return 0;
+	if (enable)
+		reg |= MDIO_AN_CTRL1_ENABLE;
 
-err_phy_device:
-	phy_device_free(phydev);
+	if (restart)
+		reg |= MDIO_AN_CTRL1_RESTART;
 
-err_mdiobus_register:
-	mdiobus_unregister(mii);
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
 
-err_mdiobus_alloc:
-	mdiobus_free(mii);
+static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+{
+	xgbe_set_an(pdata, true, true);
+
+	netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
+}
+
+static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+{
+	xgbe_set_an(pdata, false, false);
+
+	netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
+}
+
+static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+					enum xgbe_rx *state)
+{
+	unsigned int ad_reg, lp_reg, reg;
+
+	*state = XGBE_RX_COMPLETE;
+
+	/* If we're not in KR mode then we're done */
+	if (!xgbe_in_kr_mode(pdata))
+		return XGBE_AN_PAGE_RECEIVED;
+
+	/* Enable/Disable FEC */
+	ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+	lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+	reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+	reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+	if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+		reg |= pdata->fec_ability;
+
+	XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+	/* Start KR training */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+	if (reg & XGBE_KR_TRAINING_ENABLE) {
+		XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+
+		reg |= XGBE_KR_TRAINING_START;
+		XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+			    reg);
+
+		XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+
+		netif_dbg(pdata, link, pdata->netdev,
+			  "KR training initiated\n");
+	}
+
+	return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+				   enum xgbe_rx *state)
+{
+	u16 msg;
+
+	*state = XGBE_RX_XNP;
+
+	msg = XGBE_XNP_MCF_NULL_MESSAGE;
+	msg |= XGBE_XNP_MP_FORMATTED;
+
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+	return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+				   enum xgbe_rx *state)
+{
+	unsigned int link_support;
+	unsigned int reg, ad_reg, lp_reg;
+
+	/* Read Base Ability register 2 first */
+	reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+	/* Check for a supported mode, otherwise restart in a different one */
+	link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+	if (!(reg & link_support))
+		return XGBE_AN_INCOMPAT_LINK;
+
+	/* Check Extended Next Page support */
+	ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+	lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+	return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+		(lp_reg & XGBE_XNP_NP_EXCHANGE))
+	       ? xgbe_an_tx_xnp(pdata, state)
+	       : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+				   enum xgbe_rx *state)
+{
+	unsigned int ad_reg, lp_reg;
+
+	/* Check Extended Next Page support */
+	ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+	lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+	return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+		(lp_reg & XGBE_XNP_NP_EXCHANGE))
+	       ? xgbe_an_tx_xnp(pdata, state)
+	       : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+{
+	enum xgbe_rx *state;
+	unsigned long an_timeout;
+	enum xgbe_an ret;
+
+	if (!pdata->an_start) {
+		pdata->an_start = jiffies;
+	} else {
+		an_timeout = pdata->an_start +
+			     msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+		if (time_after(jiffies, an_timeout)) {
+			/* Auto-negotiation timed out, reset state */
+			pdata->kr_state = XGBE_RX_BPA;
+			pdata->kx_state = XGBE_RX_BPA;
+
+			pdata->an_start = jiffies;
+
+			netif_dbg(pdata, link, pdata->netdev,
+				  "AN timed out, resetting state\n");
+		}
+	}
+
+	state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+					   : &pdata->kx_state;
+
+	switch (*state) {
+	case XGBE_RX_BPA:
+		ret = xgbe_an_rx_bpa(pdata, state);
+		break;
+
+	case XGBE_RX_XNP:
+		ret = xgbe_an_rx_xnp(pdata, state);
+		break;
+
+	default:
+		ret = XGBE_AN_ERROR;
+	}
 
 	return ret;
 }
 
-void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
 {
-	DBGPR("-->xgbe_mdio_unregister\n");
+	/* Be sure we aren't looping trying to negotiate */
+	if (xgbe_in_kr_mode(pdata)) {
+		pdata->kr_state = XGBE_RX_ERROR;
 
-	pdata->phydev = NULL;
+		if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+		    !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+			return XGBE_AN_NO_LINK;
 
-	module_put(pdata->phy_module);
-	pdata->phy_module = NULL;
+		if (pdata->kx_state != XGBE_RX_BPA)
+			return XGBE_AN_NO_LINK;
+	} else {
+		pdata->kx_state = XGBE_RX_ERROR;
 
-	mdiobus_unregister(pdata->mii);
-	pdata->mii->priv = NULL;
+		if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+			return XGBE_AN_NO_LINK;
 
-	mdiobus_free(pdata->mii);
-	pdata->mii = NULL;
+		if (pdata->kr_state != XGBE_RX_BPA)
+			return XGBE_AN_NO_LINK;
+	}
 
-	DBGPR("<--xgbe_mdio_unregister\n");
+	xgbe_disable_an(pdata);
+
+	xgbe_switch_mode(pdata);
+
+	xgbe_restart_an(pdata);
+
+	return XGBE_AN_INCOMPAT_LINK;
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+	netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+	/* Interrupt reason must be read and cleared outside of IRQ context */
+	disable_irq_nosync(pdata->an_irq);
+
+	queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static void xgbe_an_irq_work(struct work_struct *work)
+{
+	struct xgbe_prv_data *pdata = container_of(work,
+						   struct xgbe_prv_data,
+						   an_irq_work);
+
+	/* Avoid a race between enabling the IRQ and exiting the work by
+	 * waiting for the work to finish and then queueing it
+	 */
+	flush_work(&pdata->an_work);
+	queue_work(pdata->an_workqueue, &pdata->an_work);
+}
+
+static const char *xgbe_state_as_string(enum xgbe_an state)
+{
+	switch (state) {
+	case XGBE_AN_READY:
+		return "Ready";
+	case XGBE_AN_PAGE_RECEIVED:
+		return "Page-Received";
+	case XGBE_AN_INCOMPAT_LINK:
+		return "Incompatible-Link";
+	case XGBE_AN_COMPLETE:
+		return "Complete";
+	case XGBE_AN_NO_LINK:
+		return "No-Link";
+	case XGBE_AN_ERROR:
+		return "Error";
+	default:
+		return "Undefined";
+	}
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+	struct xgbe_prv_data *pdata = container_of(work,
+						   struct xgbe_prv_data,
+						   an_work);
+	enum xgbe_an cur_state = pdata->an_state;
+	unsigned int int_reg, int_mask;
+
+	mutex_lock(&pdata->an_mutex);
+
+	/* Read the interrupt */
+	int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+	if (!int_reg)
+		goto out;
+
+next_int:
+	if (int_reg & XGBE_AN_PG_RCV) {
+		pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+		int_mask = XGBE_AN_PG_RCV;
+	} else if (int_reg & XGBE_AN_INC_LINK) {
+		pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+		int_mask = XGBE_AN_INC_LINK;
+	} else if (int_reg & XGBE_AN_INT_CMPLT) {
+		pdata->an_state = XGBE_AN_COMPLETE;
+		int_mask = XGBE_AN_INT_CMPLT;
+	} else {
+		pdata->an_state = XGBE_AN_ERROR;
+		int_mask = 0;
+	}
+
+	/* Clear the interrupt to be processed */
+	int_reg &= ~int_mask;
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
+
+	pdata->an_result = pdata->an_state;
+
+again:
+	netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
+		  xgbe_state_as_string(pdata->an_state));
+
+	cur_state = pdata->an_state;
+
+	switch (pdata->an_state) {
+	case XGBE_AN_READY:
+		pdata->an_supported = 0;
+		break;
+
+	case XGBE_AN_PAGE_RECEIVED:
+		pdata->an_state = xgbe_an_page_received(pdata);
+		pdata->an_supported++;
+		break;
+
+	case XGBE_AN_INCOMPAT_LINK:
+		pdata->an_supported = 0;
+		pdata->parallel_detect = 0;
+		pdata->an_state = xgbe_an_incompat_link(pdata);
+		break;
+
+	case XGBE_AN_COMPLETE:
+		pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+		netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
+			  pdata->an_supported ? "Auto negotiation"
+					      : "Parallel detection");
+		break;
+
+	case XGBE_AN_NO_LINK:
+		break;
+
+	default:
+		pdata->an_state = XGBE_AN_ERROR;
+	}
+
+	if (pdata->an_state == XGBE_AN_NO_LINK) {
+		int_reg = 0;
+		XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+	} else if (pdata->an_state == XGBE_AN_ERROR) {
+		netdev_err(pdata->netdev,
+			   "error during auto-negotiation, state=%u\n",
+			   cur_state);
+
+		int_reg = 0;
+		XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+	}
+
+	if (pdata->an_state >= XGBE_AN_COMPLETE) {
+		pdata->an_result = pdata->an_state;
+		pdata->an_state = XGBE_AN_READY;
+		pdata->kr_state = XGBE_RX_BPA;
+		pdata->kx_state = XGBE_RX_BPA;
+		pdata->an_start = 0;
+
+		netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
+			  xgbe_state_as_string(pdata->an_result));
+	}
+
+	if (cur_state != pdata->an_state)
+		goto again;
+
+	if (int_reg)
+		goto next_int;
+
+out:
+	enable_irq(pdata->an_irq);
+
+	mutex_unlock(&pdata->an_mutex);
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+	unsigned int reg;
+
+	/* Set up Advertisement register 3 first */
+	reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+	if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
+		reg |= 0xc000;
+	else
+		reg &= ~0xc000;
+
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+	/* Set up Advertisement register 2 next */
+	reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+	if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+		reg |= 0x80;
+	else
+		reg &= ~0x80;
+
+	if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+	    (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+		reg |= 0x20;
+	else
+		reg &= ~0x20;
+
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+	/* Set up Advertisement register 1 last */
+	reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+	if (pdata->phy.advertising & ADVERTISED_Pause)
+		reg |= 0x400;
+	else
+		reg &= ~0x400;
+
+	if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+		reg |= 0x800;
+	else
+		reg &= ~0x800;
+
+	/* We don't intend to perform XNP */
+	reg &= ~XGBE_XNP_NP_EXCHANGE;
+
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+	netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
+}
+
+static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+	if (pdata->tx_pause && pdata->rx_pause)
+		return "rx/tx";
+	else if (pdata->rx_pause)
+		return "rx";
+	else if (pdata->tx_pause)
+		return "tx";
+	else
+		return "off";
+}
+
+static const char *xgbe_phy_speed_string(int speed)
+{
+	switch (speed) {
+	case SPEED_1000:
+		return "1Gbps";
+	case SPEED_2500:
+		return "2.5Gbps";
+	case SPEED_10000:
+		return "10Gbps";
+	case SPEED_UNKNOWN:
+		return "Unknown";
+	default:
+		return "Unsupported";
+	}
+}
+
+static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+	if (pdata->phy.link)
+		netdev_info(pdata->netdev,
+			    "Link is Up - %s/%s - flow control %s\n",
+			    xgbe_phy_speed_string(pdata->phy.speed),
+			    pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+			    xgbe_phy_fc_string(pdata));
+	else
+		netdev_info(pdata->netdev, "Link is Down\n");
+}
+
+static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+{
+	int new_state = 0;
+
+	if (pdata->phy.link) {
+		/* Flow control support */
+		pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+		if (pdata->tx_pause != pdata->phy.tx_pause) {
+			new_state = 1;
+			pdata->hw_if.config_tx_flow_control(pdata);
+			pdata->tx_pause = pdata->phy.tx_pause;
+		}
+
+		if (pdata->rx_pause != pdata->phy.rx_pause) {
+			new_state = 1;
+			pdata->hw_if.config_rx_flow_control(pdata);
+			pdata->rx_pause = pdata->phy.rx_pause;
+		}
+
+		/* Speed support */
+		if (pdata->phy_speed != pdata->phy.speed) {
+			new_state = 1;
+			pdata->phy_speed = pdata->phy.speed;
+		}
+
+		if (pdata->phy_link != pdata->phy.link) {
+			new_state = 1;
+			pdata->phy_link = pdata->phy.link;
+		}
+	} else if (pdata->phy_link) {
+		new_state = 1;
+		pdata->phy_link = 0;
+		pdata->phy_speed = SPEED_UNKNOWN;
+	}
+
+	if (new_state && netif_msg_link(pdata))
+		xgbe_phy_print_status(pdata);
+}
+
+static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+	netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+	/* Disable auto-negotiation */
+	xgbe_disable_an(pdata);
+
+	/* Validate/Set specified speed */
+	switch (pdata->phy.speed) {
+	case SPEED_10000:
+		xgbe_set_mode(pdata, XGBE_MODE_KR);
+		break;
+
+	case SPEED_2500:
+	case SPEED_1000:
+		xgbe_set_mode(pdata, XGBE_MODE_KX);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* Validate duplex mode */
+	if (pdata->phy.duplex != DUPLEX_FULL)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+	set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+	pdata->link_check = jiffies;
+
+	if (pdata->phy.autoneg != AUTONEG_ENABLE)
+		return xgbe_phy_config_fixed(pdata);
+
+	netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+
+	/* Disable auto-negotiation interrupt */
+	disable_irq(pdata->an_irq);
+
+	/* Start auto-negotiation in a supported mode */
+	if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
+		xgbe_set_mode(pdata, XGBE_MODE_KR);
+	} else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+		   (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
+		xgbe_set_mode(pdata, XGBE_MODE_KX);
+	} else {
+		enable_irq(pdata->an_irq);
+		return -EINVAL;
+	}
+
+	/* Disable and stop any in progress auto-negotiation */
+	xgbe_disable_an(pdata);
+
+	/* Clear any auto-negotitation interrupts */
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+	pdata->an_result = XGBE_AN_READY;
+	pdata->an_state = XGBE_AN_READY;
+	pdata->kr_state = XGBE_RX_BPA;
+	pdata->kx_state = XGBE_RX_BPA;
+
+	/* Re-enable auto-negotiation interrupt */
+	enable_irq(pdata->an_irq);
+
+	/* Set up advertisement registers based on current settings */
+	xgbe_an_init(pdata);
+
+	/* Enable and start auto-negotiation */
+	xgbe_restart_an(pdata);
+
+	return 0;
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+	int ret;
+
+	mutex_lock(&pdata->an_mutex);
+
+	ret = __xgbe_phy_config_aneg(pdata);
+	if (ret)
+		set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+	else
+		clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
+
+	mutex_unlock(&pdata->an_mutex);
+
+	return ret;
+}
+
+static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+	return (pdata->an_result == XGBE_AN_COMPLETE);
+}
+
+static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+{
+	unsigned long link_timeout;
+
+	link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+	if (time_after(jiffies, link_timeout)) {
+		netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+		xgbe_phy_config_aneg(pdata);
+	}
+}
+
+static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+{
+	if (xgbe_in_kr_mode(pdata)) {
+		pdata->phy.speed = SPEED_10000;
+	} else {
+		switch (pdata->speed_set) {
+		case XGBE_SPEEDSET_1000_10000:
+			pdata->phy.speed = SPEED_1000;
+			break;
+
+		case XGBE_SPEEDSET_2500_10000:
+			pdata->phy.speed = SPEED_2500;
+			break;
+		}
+	}
+	pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+{
+	unsigned int ad_reg, lp_reg;
+
+	pdata->phy.lp_advertising = 0;
+
+	if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+		return xgbe_phy_status_force(pdata);
+
+	pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+	pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+	/* Compare Advertisement and Link Partner register 1 */
+	ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+	lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+	if (lp_reg & 0x400)
+		pdata->phy.lp_advertising |= ADVERTISED_Pause;
+	if (lp_reg & 0x800)
+		pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+	if (pdata->phy.pause_autoneg) {
+		/* Set flow control based on auto-negotiation result */
+		pdata->phy.tx_pause = 0;
+		pdata->phy.rx_pause = 0;
+
+		if (ad_reg & lp_reg & 0x400) {
+			pdata->phy.tx_pause = 1;
+			pdata->phy.rx_pause = 1;
+		} else if (ad_reg & lp_reg & 0x800) {
+			if (ad_reg & 0x400)
+				pdata->phy.rx_pause = 1;
+			else if (lp_reg & 0x400)
+				pdata->phy.tx_pause = 1;
+		}
+	}
+
+	/* Compare Advertisement and Link Partner register 2 */
+	ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+	lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+	if (lp_reg & 0x80)
+		pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+	if (lp_reg & 0x20) {
+		switch (pdata->speed_set) {
+		case XGBE_SPEEDSET_1000_10000:
+			pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+			break;
+		case XGBE_SPEEDSET_2500_10000:
+			pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+			break;
+		}
+	}
+
+	ad_reg &= lp_reg;
+	if (ad_reg & 0x80) {
+		pdata->phy.speed = SPEED_10000;
+		xgbe_set_mode(pdata, XGBE_MODE_KR);
+	} else if (ad_reg & 0x20) {
+		switch (pdata->speed_set) {
+		case XGBE_SPEEDSET_1000_10000:
+			pdata->phy.speed = SPEED_1000;
+			break;
+
+		case XGBE_SPEEDSET_2500_10000:
+			pdata->phy.speed = SPEED_2500;
+			break;
+		}
+
+		xgbe_set_mode(pdata, XGBE_MODE_KX);
+	} else {
+		pdata->phy.speed = SPEED_UNKNOWN;
+	}
+
+	/* Compare Advertisement and Link Partner register 3 */
+	ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+	lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+	if (lp_reg & 0xc000)
+		pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+	pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+{
+	unsigned int reg, link_aneg;
+
+	if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+		if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+			netif_carrier_off(pdata->netdev);
+
+		pdata->phy.link = 0;
+		goto adjust_link;
+	}
+
+	link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+	/* Get the link status. Link status is latched low, so read
+	 * once to clear and then read again to get current state
+	 */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+	pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+	if (pdata->phy.link) {
+		if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+			xgbe_check_link_timeout(pdata);
+			return;
+		}
+
+		xgbe_phy_status_aneg(pdata);
+
+		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+			clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+		if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
+			set_bit(XGBE_LINK, &pdata->dev_state);
+			netif_carrier_on(pdata->netdev);
+		}
+	} else {
+		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+			xgbe_check_link_timeout(pdata);
+
+			if (link_aneg)
+				return;
+		}
+
+		xgbe_phy_status_aneg(pdata);
+
+		if (test_bit(XGBE_LINK, &pdata->dev_state)) {
+			clear_bit(XGBE_LINK, &pdata->dev_state);
+			netif_carrier_off(pdata->netdev);
+		}
+	}
+
+adjust_link:
+	xgbe_phy_adjust_link(pdata);
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+	netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+	/* Disable auto-negotiation */
+	xgbe_disable_an(pdata);
+
+	/* Disable auto-negotiation interrupts */
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+	devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+
+	pdata->phy.link = 0;
+	if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+		netif_carrier_off(pdata->netdev);
+
+	xgbe_phy_adjust_link(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+	struct net_device *netdev = pdata->netdev;
+	int ret;
+
+	netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+	ret = devm_request_irq(pdata->dev, pdata->an_irq,
+			       xgbe_an_isr, 0, pdata->an_name,
+			       pdata);
+	if (ret) {
+		netdev_err(netdev, "phy irq request failed\n");
+		return ret;
+	}
+
+	/* Set initial mode - call the mode setting routines
+	 * directly to insure we are properly configured
+	 */
+	if (xgbe_use_xgmii_mode(pdata)) {
+		xgbe_xgmii_mode(pdata);
+	} else if (xgbe_use_gmii_mode(pdata)) {
+		xgbe_gmii_mode(pdata);
+	} else if (xgbe_use_gmii_2500_mode(pdata)) {
+		xgbe_gmii_2500_mode(pdata);
+	} else {
+		ret = -EINVAL;
+		goto err_irq;
+	}
+
+	/* Set up advertisement registers based on current settings */
+	xgbe_an_init(pdata);
+
+	/* Enable auto-negotiation interrupts */
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+
+	return xgbe_phy_config_aneg(pdata);
+
+err_irq:
+	devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+
+	return ret;
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+	unsigned int count, reg;
+
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+	reg |= MDIO_CTRL1_RESET;
+	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+	count = 50;
+	do {
+		msleep(20);
+		reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+	} while ((reg & MDIO_CTRL1_RESET) && --count);
+
+	if (reg & MDIO_CTRL1_RESET)
+		return -ETIMEDOUT;
+
+	/* Disable auto-negotiation for now */
+	xgbe_disable_an(pdata);
+
+	/* Clear auto-negotiation interrupts */
+	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+	return 0;
+}
+
+static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+	struct device *dev = pdata->dev;
+
+	dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+	dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+		XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+	dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+		XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+	dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+		XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+	dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+		XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+	dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+		XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+	dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+		XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+	dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+		XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+	dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+		XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+	dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+		MDIO_AN_ADVERTISE,
+		XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+	dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+		MDIO_AN_ADVERTISE + 1,
+		XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+	dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+		MDIO_AN_ADVERTISE + 2,
+		XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+	dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+		MDIO_AN_COMP_STAT,
+		XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+	dev_dbg(dev, "\n*************************************************\n");
+}
+
+static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+	mutex_init(&pdata->an_mutex);
+	INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+	INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+	pdata->mdio_mmd = MDIO_MMD_PCS;
+
+	/* Initialize supported features */
+	pdata->phy.supported = SUPPORTED_Autoneg;
+	pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	pdata->phy.supported |= SUPPORTED_Backplane;
+	pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+	switch (pdata->speed_set) {
+	case XGBE_SPEEDSET_1000_10000:
+		pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+		break;
+	case XGBE_SPEEDSET_2500_10000:
+		pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+		break;
+	}
+
+	pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+					MDIO_PMA_10GBR_FECABLE);
+	pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+			       MDIO_PMA_10GBR_FECABLE_ERRABLE);
+	if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+		pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+
+	pdata->phy.advertising = pdata->phy.supported;
+
+	pdata->phy.address = 0;
+
+	pdata->phy.autoneg = AUTONEG_ENABLE;
+	pdata->phy.speed = SPEED_UNKNOWN;
+	pdata->phy.duplex = DUPLEX_UNKNOWN;
+
+	pdata->phy.link = 0;
+
+	pdata->phy.pause_autoneg = pdata->pause_autoneg;
+	pdata->phy.tx_pause = pdata->tx_pause;
+	pdata->phy.rx_pause = pdata->rx_pause;
+
+	/* Fix up Flow Control advertising */
+	pdata->phy.advertising &= ~ADVERTISED_Pause;
+	pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+	if (pdata->rx_pause) {
+		pdata->phy.advertising |= ADVERTISED_Pause;
+		pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+	}
+
+	if (pdata->tx_pause)
+		pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
+	if (netif_msg_drv(pdata))
+		xgbe_dump_phy_registers(pdata);
+}
+
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+{
+	phy_if->phy_init        = xgbe_phy_init;
+
+	phy_if->phy_reset       = xgbe_phy_reset;
+	phy_if->phy_start       = xgbe_phy_start;
+	phy_if->phy_stop        = xgbe_phy_stop;
+
+	phy_if->phy_status      = xgbe_phy_status;
+	phy_if->phy_config_aneg = xgbe_phy_config_aneg;
 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e62dfa2..63d72a1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -129,7 +129,7 @@
 #include <net/dcbnl.h>
 
 #define XGBE_DRV_NAME		"amd-xgbe"
-#define XGBE_DRV_VERSION	"1.0.0-a"
+#define XGBE_DRV_VERSION	"1.0.2"
 #define XGBE_DRV_DESC		"AMD 10 Gigabit Ethernet Driver"
 
 /* Descriptor related defines */
@@ -178,14 +178,17 @@
 #define XGMAC_JUMBO_PACKET_MTU	9000
 #define XGMAC_MAX_JUMBO_PACKET	9018
 
-/* MDIO bus phy name */
-#define XGBE_PHY_NAME		"amd_xgbe_phy"
-#define XGBE_PRTAD		0
-
 /* Common property names */
 #define XGBE_MAC_ADDR_PROPERTY	"mac-address"
 #define XGBE_PHY_MODE_PROPERTY	"phy-mode"
 #define XGBE_DMA_IRQS_PROPERTY	"amd,per-channel-interrupt"
+#define XGBE_SPEEDSET_PROPERTY	"amd,speed-set"
+#define XGBE_BLWC_PROPERTY	"amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY	"amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY	"amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY	"amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY	"amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY	"amd,serdes-dfe-tap-enable"
 
 /* Device-tree clock names */
 #define XGBE_DMA_CLOCK		"dma_clk"
@@ -241,6 +244,49 @@
 #define XGBE_RSS_LOOKUP_TABLE_TYPE	0
 #define XGBE_RSS_HASH_KEY_TYPE		1
 
+/* Auto-negotiation */
+#define XGBE_AN_MS_TIMEOUT		500
+#define XGBE_LINK_TIMEOUT		10
+
+#define XGBE_AN_INT_CMPLT		0x01
+#define XGBE_AN_INC_LINK		0x02
+#define XGBE_AN_PG_RCV			0x04
+#define XGBE_AN_INT_MASK		0x07
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT		500
+
+/* Default SerDes settings */
+#define XGBE_SPEED_10000_BLWC		0
+#define XGBE_SPEED_10000_CDR		0x7
+#define XGBE_SPEED_10000_PLL		0x1
+#define XGBE_SPEED_10000_PQ		0x12
+#define XGBE_SPEED_10000_RATE		0x0
+#define XGBE_SPEED_10000_TXAMP		0xa
+#define XGBE_SPEED_10000_WORD		0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG	0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE	0x7f
+
+#define XGBE_SPEED_2500_BLWC		1
+#define XGBE_SPEED_2500_CDR		0x2
+#define XGBE_SPEED_2500_PLL		0x0
+#define XGBE_SPEED_2500_PQ		0xa
+#define XGBE_SPEED_2500_RATE		0x1
+#define XGBE_SPEED_2500_TXAMP		0xf
+#define XGBE_SPEED_2500_WORD		0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG	0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE	0x0
+
+#define XGBE_SPEED_1000_BLWC		1
+#define XGBE_SPEED_1000_CDR		0x2
+#define XGBE_SPEED_1000_PLL		0x0
+#define XGBE_SPEED_1000_PQ		0xa
+#define XGBE_SPEED_1000_RATE		0x3
+#define XGBE_SPEED_1000_TXAMP		0xf
+#define XGBE_SPEED_1000_WORD		0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG	0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE	0x0
+
 struct xgbe_prv_data;
 
 struct xgbe_packet_data {
@@ -334,8 +380,6 @@
 	 */
 	unsigned int state_saved;
 	struct {
-		unsigned int incomplete;
-		unsigned int context_next;
 		struct sk_buff *skb;
 		unsigned int len;
 		unsigned int error;
@@ -414,6 +458,13 @@
 	struct xgbe_ring *rx_ring;
 } ____cacheline_aligned;
 
+enum xgbe_state {
+	XGBE_DOWN,
+	XGBE_LINK,
+	XGBE_LINK_INIT,
+	XGBE_LINK_ERR,
+};
+
 enum xgbe_int {
 	XGMAC_INT_DMA_CH_SR_TI,
 	XGMAC_INT_DMA_CH_SR_TPS,
@@ -445,6 +496,57 @@
 	XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
 };
 
+enum xgbe_speed {
+	XGBE_SPEED_1000 = 0,
+	XGBE_SPEED_2500,
+	XGBE_SPEED_10000,
+	XGBE_SPEEDS,
+};
+
+enum xgbe_an {
+	XGBE_AN_READY = 0,
+	XGBE_AN_PAGE_RECEIVED,
+	XGBE_AN_INCOMPAT_LINK,
+	XGBE_AN_COMPLETE,
+	XGBE_AN_NO_LINK,
+	XGBE_AN_ERROR,
+};
+
+enum xgbe_rx {
+	XGBE_RX_BPA = 0,
+	XGBE_RX_XNP,
+	XGBE_RX_COMPLETE,
+	XGBE_RX_ERROR,
+};
+
+enum xgbe_mode {
+	XGBE_MODE_KR = 0,
+	XGBE_MODE_KX,
+};
+
+enum xgbe_speedset {
+	XGBE_SPEEDSET_1000_10000 = 0,
+	XGBE_SPEEDSET_2500_10000,
+};
+
+struct xgbe_phy {
+	u32 supported;
+	u32 advertising;
+	u32 lp_advertising;
+
+	int address;
+
+	int autoneg;
+	int speed;
+	int duplex;
+
+	int link;
+
+	int pause_autoneg;
+	int tx_pause;
+	int rx_pause;
+};
+
 struct xgbe_mmc_stats {
 	/* Tx Stats */
 	u64 txoctetcount_gb;
@@ -492,6 +594,11 @@
 	u64 rxwatchdogerror;
 };
 
+struct xgbe_ext_stats {
+	u64 tx_tso_packets;
+	u64 rx_split_header_packets;
+};
+
 struct xgbe_hw_if {
 	int (*tx_complete)(struct xgbe_ring_desc *);
 
@@ -591,6 +698,20 @@
 	int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
 };
 
+struct xgbe_phy_if {
+	/* For initial PHY setup */
+	void (*phy_init)(struct xgbe_prv_data *);
+
+	/* For PHY support when setting device up/down */
+	int (*phy_reset)(struct xgbe_prv_data *);
+	int (*phy_start)(struct xgbe_prv_data *);
+	void (*phy_stop)(struct xgbe_prv_data *);
+
+	/* For PHY support while device is up */
+	void (*phy_status)(struct xgbe_prv_data *);
+	int (*phy_config_aneg)(struct xgbe_prv_data *);
+};
+
 struct xgbe_desc_if {
 	int (*alloc_ring_resources)(struct xgbe_prv_data *);
 	void (*free_ring_resources)(struct xgbe_prv_data *);
@@ -660,6 +781,9 @@
 	/* XGMAC/XPCS related mmio registers */
 	void __iomem *xgmac_regs;	/* XGMAC CSRs */
 	void __iomem *xpcs_regs;	/* XPCS MMD registers */
+	void __iomem *rxtx_regs;	/* SerDes Rx/Tx CSRs */
+	void __iomem *sir0_regs;	/* SerDes integration registers (1/2) */
+	void __iomem *sir1_regs;	/* SerDes integration registers (2/2) */
 
 	/* Overall device lock */
 	spinlock_t lock;
@@ -670,10 +794,14 @@
 	/* RSS addressing mutex */
 	struct mutex rss_mutex;
 
+	/* Flags representing xgbe_state */
+	unsigned long dev_state;
+
 	int dev_irq;
 	unsigned int per_channel_irq;
 
 	struct xgbe_hw_if hw_if;
+	struct xgbe_phy_if phy_if;
 	struct xgbe_desc_if desc_if;
 
 	/* AXI DMA settings */
@@ -682,6 +810,11 @@
 	unsigned int arcache;
 	unsigned int awcache;
 
+	/* Service routine support */
+	struct workqueue_struct *dev_workqueue;
+	struct work_struct service_work;
+	struct timer_list service_timer;
+
 	/* Rings for Tx/Rx on a DMA channel */
 	struct xgbe_channel *channel;
 	unsigned int channel_count;
@@ -729,27 +862,12 @@
 	u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
 	u32 rss_options;
 
-	/* MDIO settings */
-	struct module *phy_module;
-	char *mii_bus_id;
-	struct mii_bus *mii;
-	int mdio_mmd;
-	struct phy_device *phydev;
-	int default_autoneg;
-	int default_speed;
-
-	/* Current PHY settings */
-	phy_interface_t phy_mode;
-	int phy_link;
-	int phy_speed;
-	unsigned int phy_tx_pause;
-	unsigned int phy_rx_pause;
-
 	/* Netdev related settings */
 	unsigned char mac_addr[ETH_ALEN];
 	netdev_features_t netdev_features;
 	struct napi_struct napi;
 	struct xgbe_mmc_stats mmc_stats;
+	struct xgbe_ext_stats ext_stats;
 
 	/* Filtering support */
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -787,6 +905,54 @@
 	/* Keeps track of power mode */
 	unsigned int power_down;
 
+	/* Network interface message level setting */
+	u32 msg_enable;
+
+	/* Current PHY settings */
+	phy_interface_t phy_mode;
+	int phy_link;
+	int phy_speed;
+
+	/* MDIO/PHY related settings */
+	struct xgbe_phy phy;
+	int mdio_mmd;
+	unsigned long link_check;
+
+	char an_name[IFNAMSIZ + 32];
+	struct workqueue_struct *an_workqueue;
+
+	int an_irq;
+	struct work_struct an_irq_work;
+
+	unsigned int speed_set;
+
+	/* SerDes UEFI configurable settings.
+	 *   Switching between modes/speeds requires new values for some
+	 *   SerDes settings.  The values can be supplied as device
+	 *   properties in array format.  The first array entry is for
+	 *   1GbE, second for 2.5GbE and third for 10GbE
+	 */
+	u32 serdes_blwc[XGBE_SPEEDS];
+	u32 serdes_cdr_rate[XGBE_SPEEDS];
+	u32 serdes_pq_skew[XGBE_SPEEDS];
+	u32 serdes_tx_amp[XGBE_SPEEDS];
+	u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
+	u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
+
+	/* Auto-negotiation state machine support */
+	struct mutex an_mutex;
+	enum xgbe_an an_result;
+	enum xgbe_an an_state;
+	enum xgbe_rx kr_state;
+	enum xgbe_rx kx_state;
+	struct work_struct an_work;
+	unsigned int an_supported;
+	unsigned int parallel_detect;
+	unsigned int fec_ability;
+	unsigned long an_start;
+
+	unsigned int lpm_ctrl;		/* CTRL1 for resume */
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *xgbe_debugfs;
 
@@ -800,6 +966,7 @@
 /* Function prototypes*/
 
 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
 struct net_device_ops *xgbe_get_netdev_ops(void);
 struct ethtool_ops *xgbe_get_ethtool_ops(void);
@@ -807,14 +974,11 @@
 const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
 #endif
 
-int xgbe_mdio_register(struct xgbe_prv_data *);
-void xgbe_mdio_unregister(struct xgbe_prv_data *);
-void xgbe_dump_phy_registers(struct xgbe_prv_data *);
 void xgbe_ptp_register(struct xgbe_prv_data *);
 void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
-		       unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+		       unsigned int, unsigned int, unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
 		       unsigned int);
 void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
 void xgbe_get_all_hw_features(struct xgbe_prv_data *);
@@ -831,18 +995,6 @@
 static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
 #endif /* CONFIG_DEBUG_FS */
 
-/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_DESC_DUMP
-#define XGMAC_ENABLE_RX_DESC_DUMP
-#endif
-
-/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_PKT_DUMP
-#define XGMAC_ENABLE_RX_PKT_DUMP
-#endif
-
 /* NOTE: Uncomment for function trace log messages in KERNEL LOG */
 #if 0
 #define YDEBUG
@@ -852,10 +1004,8 @@
 /* For debug prints */
 #ifdef YDEBUG
 #define DBGPR(x...) pr_alert(x)
-#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
 #else
 #define DBGPR(x...) do { } while (0)
-#define DBGPHY_REGS(x...) do { } while (0)
 #endif
 
 #ifdef YDEBUG_MDIO
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index 68be5655..700b5ab 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -3,5 +3,5 @@
 #
 
 xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
-		   xgene_enet_main.o xgene_enet_ethtool.o
+		   xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
 obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index b927021..a626c43 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -87,10 +87,11 @@
 
 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
 {
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
 	int i;
 
 	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
-	for (i = 0; i < NUM_RING_CONFIG; i++) {
+	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
 		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
 				     ring->state[i]);
 	}
@@ -98,7 +99,7 @@
 
 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
 {
-	memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+	memset(ring->state, 0, sizeof(ring->state));
 	xgene_enet_write_ring_state(ring);
 }
 
@@ -141,8 +142,8 @@
 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
 }
 
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
-					struct xgene_enet_desc_ring *ring)
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+				    struct xgene_enet_desc_ring *ring)
 {
 	u32 size = ring->size;
 	u32 i, data;
@@ -168,7 +169,7 @@
 	return ring;
 }
 
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
 {
 	u32 data;
 	bool is_bufpool;
@@ -186,6 +187,22 @@
 	xgene_enet_clr_ring_state(ring);
 }
 
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+	iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+	u32 __iomem *cmd_base = ring->cmd_base;
+	u32 ring_state, num_msgs;
+
+	ring_state = ioread32(&cmd_base[1]);
+	num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+	return num_msgs;
+}
+
 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
 			    struct xgene_enet_pdata *pdata,
 			    enum xgene_enet_err_code status)
@@ -593,7 +610,7 @@
 	if (!xgene_ring_mgr_init(pdata))
 		return -ENODEV;
 
-	if (pdata->clk) {
+	if (!IS_ERR(pdata->clk)) {
 		clk_prepare_enable(pdata->clk);
 		clk_disable_unprepare(pdata->clk);
 		clk_prepare_enable(pdata->clk);
@@ -612,7 +629,8 @@
 
 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
 {
-	clk_disable_unprepare(pdata->clk);
+	if (!IS_ERR(pdata->clk))
+		clk_disable_unprepare(pdata->clk);
 }
 
 static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -734,7 +752,7 @@
 	if (ret)
 		return -EINVAL;
 
-	phy = get_phy_device(mdio, phy_id, true);
+	phy = get_phy_device(mdio, phy_id, false);
 	if (!phy || IS_ERR(phy))
 		return -EIO;
 
@@ -803,3 +821,12 @@
 	.cle_bypass = xgene_enet_cle_bypass,
 	.shutdown = xgene_gport_shutdown,
 };
+
+struct xgene_ring_ops xgene_ring1_ops = {
+	.num_ring_config = NUM_RING_CONFIG,
+	.num_ring_id_shift = 6,
+	.setup = xgene_enet_setup_ring,
+	.clear = xgene_enet_clear_ring,
+	.wr_cmd = xgene_enet_wr_cmd,
+	.len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d9bc89d6..541bed0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -26,6 +26,7 @@
 
 struct xgene_enet_pdata;
 struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
 
 /* clears and then set bits */
 static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
@@ -101,8 +102,8 @@
 
 #define BLOCK_ETH_CSR_OFFSET		0x2000
 #define BLOCK_ETH_RING_IF_OFFSET	0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET	0xc000
 #define BLOCK_ETH_DIAG_CSR_OFFSET	0xD000
-
 #define BLOCK_ETH_MAC_OFFSET		0x0000
 #define BLOCK_ETH_MAC_CSR_OFFSET	0x2800
 
@@ -261,6 +262,7 @@
 
 enum xgene_ring_owner {
 	RING_OWNER_ETH0,
+	RING_OWNER_ETH1,
 	RING_OWNER_CPU = 15,
 	RING_OWNER_INVALID
 };
@@ -314,9 +316,6 @@
 		      size / WORK_DESC_SIZE;
 }
 
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
-		struct xgene_enet_desc_ring *ring);
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
 			    struct xgene_enet_pdata *pdata,
 			    enum xgene_enet_err_code status);
@@ -327,5 +326,6 @@
 
 extern struct xgene_mac_ops xgene_gmac_ops;
 extern struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
 
 #endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 40d3530..95153b2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -28,6 +28,9 @@
 #define RES_RING_CSR	1
 #define RES_RING_CMD	2
 
+static const struct of_device_id xgene_enet_of_match[];
+static const struct acpi_device_id xgene_enet_acpi_match[];
+
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
 	struct xgene_enet_raw_desc16 *raw_desc;
@@ -48,6 +51,7 @@
 {
 	struct sk_buff *skb;
 	struct xgene_enet_raw_desc16 *raw_desc;
+	struct xgene_enet_pdata *pdata;
 	struct net_device *ndev;
 	struct device *dev;
 	dma_addr_t dma_addr;
@@ -58,6 +62,7 @@
 
 	ndev = buf_pool->ndev;
 	dev = ndev_to_dev(buf_pool->ndev);
+	pdata = netdev_priv(ndev);
 	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
 	len = XGENE_ENET_MAX_MTU;
 
@@ -82,7 +87,7 @@
 		tail = (tail + 1) & slots;
 	}
 
-	iowrite32(nbuf, buf_pool->cmd);
+	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
 	buf_pool->tail = tail;
 
 	return 0;
@@ -102,26 +107,16 @@
 	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
 }
 
-static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
-{
-	u32 __iomem *cmd_base = ring->cmd_base;
-	u32 ring_state, num_msgs;
-
-	ring_state = ioread32(&cmd_base[1]);
-	num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
-
-	return num_msgs >> NUMMSGSINQ_POS;
-}
-
 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
+	struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
 	struct xgene_enet_raw_desc16 *raw_desc;
 	u32 slots = buf_pool->slots - 1;
 	u32 tail = buf_pool->tail;
 	u32 userinfo;
 	int i, len;
 
-	len = xgene_enet_ring_len(buf_pool);
+	len = pdata->ring_ops->len(buf_pool);
 	for (i = 0; i < len; i++) {
 		tail = (tail - 1) & slots;
 		raw_desc = &buf_pool->raw_desc16[tail];
@@ -131,7 +126,7 @@
 		dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
 	}
 
-	iowrite32(-len, buf_pool->cmd);
+	pdata->ring_ops->wr_cmd(buf_pool, -len);
 	buf_pool->tail = tail;
 }
 
@@ -263,8 +258,8 @@
 	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
 	u32 tx_level, cq_level;
 
-	tx_level = xgene_enet_ring_len(tx_ring);
-	cq_level = xgene_enet_ring_len(cp_ring);
+	tx_level = pdata->ring_ops->len(tx_ring);
+	cq_level = pdata->ring_ops->len(cp_ring);
 	if (unlikely(tx_level > pdata->tx_qcnt_hi ||
 		     cq_level > pdata->cp_qcnt_hi)) {
 		netif_stop_queue(ndev);
@@ -276,7 +271,7 @@
 		return NETDEV_TX_OK;
 	}
 
-	iowrite32(1, tx_ring->cmd);
+	pdata->ring_ops->wr_cmd(tx_ring, 1);
 	skb_tx_timestamp(skb);
 	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
 
@@ -389,11 +384,11 @@
 	} while (--budget);
 
 	if (likely(count)) {
-		iowrite32(-count, ring->cmd);
+		pdata->ring_ops->wr_cmd(ring, -count);
 		ring->head = head;
 
 		if (netif_queue_stopped(ring->ndev)) {
-			if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
+			if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
 				netif_wake_queue(ring->ndev);
 		}
 	}
@@ -510,6 +505,7 @@
 	else
 		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
 
+	netif_carrier_off(ndev);
 	netif_start_queue(ndev);
 
 	return ret;
@@ -545,7 +541,7 @@
 	pdata = netdev_priv(ring->ndev);
 	dev = ndev_to_dev(ring->ndev);
 
-	xgene_enet_clear_ring(ring);
+	pdata->ring_ops->clear(ring);
 	dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
 }
 
@@ -598,15 +594,17 @@
 
 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
 {
+	struct xgene_enet_pdata *pdata;
 	struct device *dev;
 
 	if (!ring)
 		return;
 
 	dev = ndev_to_dev(ring->ndev);
+	pdata = netdev_priv(ring->ndev);
 
 	if (ring->desc_addr) {
-		xgene_enet_clear_ring(ring);
+		pdata->ring_ops->clear(ring);
 		dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
 	}
 	devm_kfree(dev, ring);
@@ -637,6 +635,25 @@
 	}
 }
 
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+				 struct xgene_enet_desc_ring *ring)
+{
+	if ((pdata->enet_id == XGENE_ENET2) &&
+	    (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+		return true;
+	}
+
+	return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+					      struct xgene_enet_desc_ring *ring)
+{
+	u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+	return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
 			struct net_device *ndev, u32 ring_num,
 			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
@@ -668,9 +685,20 @@
 	}
 	ring->size = size;
 
-	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+	if (is_irq_mbox_required(pdata, ring)) {
+		ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
+				&ring->irq_mbox_dma, GFP_KERNEL);
+		if (!ring->irq_mbox_addr) {
+			dma_free_coherent(dev, size, ring->desc_addr,
+					  ring->dma);
+			devm_kfree(dev, ring);
+			return NULL;
+		}
+	}
+
+	ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
-	ring = xgene_enet_setup_ring(ring);
+	ring = pdata->ring_ops->setup(ring);
 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
 		   ring->num, ring->size, ring->id, ring->slots);
 
@@ -682,12 +710,34 @@
 	return (owner << 6) | (bufnum & GENMASK(5, 0));
 }
 
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+	enum xgene_ring_owner owner;
+
+	if (p->enet_id == XGENE_ENET1) {
+		switch (p->phy_mode) {
+		case PHY_INTERFACE_MODE_SGMII:
+			owner = RING_OWNER_ETH0;
+			break;
+		default:
+			owner = (!p->port_id) ? RING_OWNER_ETH0 :
+						RING_OWNER_ETH1;
+			break;
+		}
+	} else {
+		owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+	}
+
+	return owner;
+}
+
 static int xgene_enet_create_desc_rings(struct net_device *ndev)
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct device *dev = ndev_to_dev(ndev);
 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
 	struct xgene_enet_desc_ring *buf_pool = NULL;
+	enum xgene_ring_owner owner;
 	u8 cpu_bufnum = pdata->cpu_bufnum;
 	u8 eth_bufnum = pdata->eth_bufnum;
 	u8 bp_bufnum = pdata->bp_bufnum;
@@ -696,6 +746,7 @@
 	int ret;
 
 	/* allocate rx descriptor ring */
+	owner = xgene_derive_ring_owner(pdata);
 	ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
 	rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
 					      RING_CFGSIZE_16KB, ring_id);
@@ -705,7 +756,8 @@
 	}
 
 	/* allocate buffer pool for receiving packets */
-	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+	owner = xgene_derive_ring_owner(pdata);
+	ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
 	buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
 					       RING_CFGSIZE_2KB, ring_id);
 	if (!buf_pool) {
@@ -734,7 +786,8 @@
 	pdata->rx_ring = rx_ring;
 
 	/* allocate tx descriptor ring */
-	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+	owner = xgene_derive_ring_owner(pdata);
+	ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
 	tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
 					      RING_CFGSIZE_16KB, ring_id);
 	if (!tx_ring) {
@@ -818,20 +871,38 @@
 	.ndo_set_mac_address = xgene_enet_set_mac_address,
 };
 
-static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
+#ifdef CONFIG_ACPI
+static int xgene_get_port_id_acpi(struct device *dev,
+				  struct xgene_enet_pdata *pdata)
+{
+	acpi_status status;
+	u64 temp;
+
+	status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
+	if (ACPI_FAILURE(status)) {
+		pdata->port_id = 0;
+	} else {
+		pdata->port_id = temp;
+	}
+
+	return 0;
+}
+#endif
+
+static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
 {
 	u32 id = 0;
 	int ret;
 
-	ret = device_property_read_u32(dev, "port-id", &id);
-	if (!ret && id > 1) {
-		dev_err(dev, "Incorrect port-id specified\n");
-		return -ENODEV;
+	ret = of_property_read_u32(dev->of_node, "port-id", &id);
+	if (ret) {
+		pdata->port_id = 0;
+		ret = 0;
+	} else {
+		pdata->port_id = id & BIT(0);
 	}
 
-	pdata->port_id = id;
-
-	return 0;
+	return ret;
 }
 
 static int xgene_get_mac_address(struct device *dev,
@@ -876,6 +947,7 @@
 	struct device *dev;
 	struct resource *res;
 	void __iomem *base_addr;
+	u32 offset;
 	int ret;
 
 	pdev = pdata->pdev;
@@ -917,7 +989,12 @@
 		return -ENOMEM;
 	}
 
-	ret = xgene_get_port_id(dev, pdata);
+	if (dev->of_node)
+		ret = xgene_get_port_id_dt(dev, pdata);
+#ifdef CONFIG_ACPI
+	else
+		ret = xgene_get_port_id_acpi(dev, pdata);
+#endif
 	if (ret)
 		return ret;
 
@@ -949,27 +1026,35 @@
 	if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
 		ret = platform_get_irq(pdev, 1);
 		if (ret <= 0) {
-			dev_err(dev, "Unable to get ENET Tx completion IRQ\n");
-			ret = ret ? : -ENXIO;
-			return ret;
+			pdata->cq_cnt = 0;
+			dev_info(dev, "Unable to get Tx completion IRQ,"
+				 "using Rx IRQ instead\n");
+		} else {
+			pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
+			pdata->txc_irq = ret;
 		}
-		pdata->txc_irq = ret;
 	}
 
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pdata->clk)) {
 		/* Firmware may have set up the clock already. */
-		pdata->clk = NULL;
+		dev_info(dev, "clocks have been setup already\n");
 	}
 
-	base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+		base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+	else
+		base_addr = pdata->base_addr;
 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
 	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
 	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
 		pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
-		pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+		offset = (pdata->enet_id == XGENE_ENET1) ?
+			  BLOCK_ETH_MAC_CSR_OFFSET :
+			  X2_BLOCK_ETH_MAC_CSR_OFFSET;
+		pdata->mcx_mac_csr_addr = base_addr + offset;
 	} else {
 		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
 		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
@@ -1024,33 +1109,52 @@
 		pdata->mac_ops = &xgene_sgmac_ops;
 		pdata->port_ops = &xgene_sgport_ops;
 		pdata->rm = RM1;
-		pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
 		break;
 	default:
 		pdata->mac_ops = &xgene_xgmac_ops;
 		pdata->port_ops = &xgene_xgport_ops;
 		pdata->rm = RM0;
-		pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
 		break;
 	}
 
-	switch (pdata->port_id) {
-	case 0:
-		pdata->cpu_bufnum = START_CPU_BUFNUM_0;
-		pdata->eth_bufnum = START_ETH_BUFNUM_0;
-		pdata->bp_bufnum = START_BP_BUFNUM_0;
-		pdata->ring_num = START_RING_NUM_0;
-		break;
-	case 1:
-		pdata->cpu_bufnum = START_CPU_BUFNUM_1;
-		pdata->eth_bufnum = START_ETH_BUFNUM_1;
-		pdata->bp_bufnum = START_BP_BUFNUM_1;
-		pdata->ring_num = START_RING_NUM_1;
-		break;
-	default:
-		break;
+	if (pdata->enet_id == XGENE_ENET1) {
+		switch (pdata->port_id) {
+		case 0:
+			pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+			pdata->eth_bufnum = START_ETH_BUFNUM_0;
+			pdata->bp_bufnum = START_BP_BUFNUM_0;
+			pdata->ring_num = START_RING_NUM_0;
+			break;
+		case 1:
+			pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+			pdata->eth_bufnum = START_ETH_BUFNUM_1;
+			pdata->bp_bufnum = START_BP_BUFNUM_1;
+			pdata->ring_num = START_RING_NUM_1;
+			break;
+		default:
+			break;
+		}
+		pdata->ring_ops = &xgene_ring1_ops;
+	} else {
+		switch (pdata->port_id) {
+		case 0:
+			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+			pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+			pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+			pdata->ring_num = X2_START_RING_NUM_0;
+			break;
+		case 1:
+			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+			pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+			pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+			pdata->ring_num = X2_START_RING_NUM_1;
+			break;
+		default:
+			break;
+		}
+		pdata->rm = RM0;
+		pdata->ring_ops = &xgene_ring2_ops;
 	}
-
 }
 
 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
@@ -1086,6 +1190,7 @@
 	struct xgene_enet_pdata *pdata;
 	struct device *dev = &pdev->dev;
 	struct xgene_mac_ops *mac_ops;
+	const struct of_device_id *of_id;
 	int ret;
 
 	ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -1104,6 +1209,24 @@
 			  NETIF_F_GSO |
 			  NETIF_F_GRO;
 
+	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+	if (of_id) {
+		pdata->enet_id = (enum xgene_enet_id)of_id->data;
+	}
+#ifdef CONFIG_ACPI
+	else {
+		const struct acpi_device_id *acpi_id;
+
+		acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
+		if (acpi_id)
+			pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
+	}
+#endif
+	if (!pdata->enet_id) {
+		free_netdev(ndev);
+		return -ENODEV;
+	}
+
 	ret = xgene_enet_get_resources(pdata);
 	if (ret)
 		goto err;
@@ -1165,9 +1288,11 @@
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id xgene_enet_acpi_match[] = {
-	{ "APMC0D05", },
-	{ "APMC0D30", },
-	{ "APMC0D31", },
+	{ "APMC0D05", XGENE_ENET1},
+	{ "APMC0D30", XGENE_ENET1},
+	{ "APMC0D31", XGENE_ENET1},
+	{ "APMC0D26", XGENE_ENET2},
+	{ "APMC0D25", XGENE_ENET2},
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1175,9 +1300,11 @@
 
 #ifdef CONFIG_OF
 static const struct of_device_id xgene_enet_of_match[] = {
-	{.compatible = "apm,xgene-enet",},
-	{.compatible = "apm,xgene1-sgenet",},
-	{.compatible = "apm,xgene1-xgenet",},
+	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
 	{},
 };
 
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 8f3d232..1c85fc8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -35,6 +35,7 @@
 #include <linux/if_vlan.h>
 #include <linux/phy.h>
 #include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
 
 #define XGENE_DRV_VERSION	"v1.0"
 #define XGENE_ENET_MAX_MTU	1536
@@ -51,12 +52,26 @@
 #define START_BP_BUFNUM_1	0x2A
 #define START_RING_NUM_1	264
 
+#define X2_START_CPU_BUFNUM_0	0
+#define X2_START_ETH_BUFNUM_0	0
+#define X2_START_BP_BUFNUM_0	0x20
+#define X2_START_RING_NUM_0	0
+#define X2_START_CPU_BUFNUM_1	0xc
+#define X2_START_ETH_BUFNUM_1	0
+#define X2_START_BP_BUFNUM_1	0x20
+#define X2_START_RING_NUM_1	256
+
 #define IRQ_ID_SIZE		16
 #define XGENE_MAX_TXC_RINGS	1
 
 #define PHY_POLL_LINK_ON	(10 * HZ)
 #define PHY_POLL_LINK_OFF	(PHY_POLL_LINK_ON / 5)
 
+enum xgene_enet_id {
+	XGENE_ENET1 = 1,
+	XGENE_ENET2
+};
+
 /* software context of a descriptor ring */
 struct xgene_enet_desc_ring {
 	struct net_device *ndev;
@@ -68,10 +83,12 @@
 	u16 irq;
 	char irq_name[IRQ_ID_SIZE];
 	u32 size;
-	u32 state[NUM_RING_CONFIG];
+	u32 state[X2_NUM_RING_CONFIG];
 	void __iomem *cmd_base;
 	void __iomem *cmd;
 	dma_addr_t dma;
+	dma_addr_t irq_mbox_dma;
+	void *irq_mbox_addr;
 	u16 dst_ring_num;
 	u8 nbufpool;
 	struct sk_buff *(*rx_skb);
@@ -105,6 +122,15 @@
 	void (*shutdown)(struct xgene_enet_pdata *pdata);
 };
 
+struct xgene_ring_ops {
+	u8 num_ring_config;
+	u8 num_ring_id_shift;
+	struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+	void (*clear)(struct xgene_enet_desc_ring *);
+	void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+	u32 (*len)(struct xgene_enet_desc_ring *);
+};
+
 /* ethernet private data */
 struct xgene_enet_pdata {
 	struct net_device *ndev;
@@ -113,6 +139,7 @@
 	int phy_speed;
 	struct clk *clk;
 	struct platform_device *pdev;
+	enum xgene_enet_id enet_id;
 	struct xgene_enet_desc_ring *tx_ring;
 	struct xgene_enet_desc_ring *rx_ring;
 	char *dev_name;
@@ -136,6 +163,7 @@
 	struct rtnl_link_stats64 stats;
 	struct xgene_mac_ops *mac_ops;
 	struct xgene_port_ops *port_ops;
+	struct xgene_ring_ops *ring_ops;
 	struct delayed_work link_work;
 	u32 port_id;
 	u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644
index 0000000..0b6896b
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -0,0 +1,200 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	u64 addr = ring->dma;
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+		ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+		ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+	}
+	ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+
+	addr >>= 8;
+	ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+	addr >>= 27;
+	ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+		    | ACCEPTLERR
+		    | SET_VAL(RINGADDRH, addr);
+	ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+	ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	bool is_bufpool;
+	u32 val;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+	ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+	if (is_bufpool)
+		ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+
+	ring_cfg[3] |= RECOMBBUF;
+	ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+				 u32 offset, u32 data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+	int i;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+				     ring->state[i]);
+	}
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	memset(ring->state, 0, sizeof(ring->state));
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	enum xgene_ring_owner owner;
+
+	xgene_enet_ring_set_type(ring);
+
+	owner = xgene_enet_ring_owner(ring->id);
+	if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+		xgene_enet_ring_set_recombbuf(ring);
+
+	xgene_enet_ring_init(ring);
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id_val, ring_id_buf;
+	bool is_bufpool;
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+		return;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+	ring_id_val = ring->id & GENMASK(9, 0);
+	ring_id_val |= OVERWRITE;
+
+	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+	ring_id_buf |= PREFETCH_BUF_EN;
+	if (is_bufpool)
+		ring_id_buf |= IS_BUFFER_POOL;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id;
+
+	ring_id = ring->id | OVERWRITE;
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+				    struct xgene_enet_desc_ring *ring)
+{
+	bool is_bufpool;
+	u32 addr, i;
+
+	xgene_enet_clr_ring_state(ring);
+	xgene_enet_set_ring_state(ring);
+	xgene_enet_set_ring_id(ring);
+
+	ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+		return ring;
+
+	addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+	xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+	for (i = 0; i < ring->slots; i++)
+		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+	return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+	xgene_enet_clr_desc_ring_id(ring);
+	xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+	u32 data = 0;
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+		data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+		       INTR_CLEAR;
+	}
+	data |= (count & GENMASK(16, 0));
+
+	iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+	u32 __iomem *cmd_base = ring->cmd_base;
+	u32 ring_state, num_msgs;
+
+	ring_state = ioread32(&cmd_base[1]);
+	num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+	return num_msgs;
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+	.num_ring_config = X2_NUM_RING_CONFIG,
+	.num_ring_id_shift = 13,
+	.setup = xgene_enet_setup_ring,
+	.clear = xgene_enet_clear_ring,
+	.wr_cmd = xgene_enet_wr_cmd,
+	.len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644
index 0000000..8b235db
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG	6
+
+#define INTR_MBOX_SIZE		1024
+#define CSR_VMID0_INTR_MBOX	0x0270
+#define INTR_CLEAR		BIT(23)
+
+#define X2_MSG_AM_POS		10
+#define X2_QBASE_AM_POS		11
+#define X2_INTLINE_POS		24
+#define X2_INTLINE_LEN		5
+#define X2_CFGCRID_POS		29
+#define X2_CFGCRID_LEN		3
+#define X2_SELTHRSH_POS		7
+#define X2_SELTHRSH_LEN		3
+#define X2_RINGTYPE_POS		23
+#define X2_RINGTYPE_LEN		2
+#define X2_DEQINTEN_POS		29
+#define X2_RECOMTIMEOUT_POS	0
+#define X2_RECOMTIMEOUT_LEN	7
+#define X2_NUMMSGSINQ_POS	0
+#define X2_NUMMSGSINQ_LEN	17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f27fb6f2..05b817e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -21,6 +21,7 @@
 #include "xgene_enet_main.h"
 #include "xgene_enet_hw.h"
 #include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
 
 static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
 {
@@ -39,6 +40,14 @@
 	iowrite32(val, p->eth_diag_csr_addr + offset);
 }
 
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
 static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
 				   u32 wr_addr, u32 wr_data)
 {
@@ -140,8 +149,9 @@
 
 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
 {
-	u32 val = 0xffffffff;
+	u32 val;
 
+	val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
 }
@@ -227,6 +237,8 @@
 {
 	u32 data, loop = 10;
 	u32 offset = p->port_id * 4;
+	u32 enet_spare_cfg_reg, rsif_config_reg;
+	u32 cfg_bypass_reg, rx_dv_gate_reg;
 
 	xgene_sgmac_reset(p);
 
@@ -239,7 +251,7 @@
 					  SGMII_STATUS_ADDR >> 2);
 		if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
 			break;
-		usleep_range(10, 20);
+		usleep_range(1000, 2000);
 	}
 	if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
 		netdev_err(p->ndev, "Auto-negotiation failed\n");
@@ -249,33 +261,38 @@
 	xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
 	xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
 
-	data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
+	if (p->enet_id == XGENE_ENET1) {
+		enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+		rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+		cfg_bypass_reg = CFG_BYPASS_ADDR;
+		rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+	} else {
+		enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+		rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+		cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+		rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+	}
+
+	data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
 	data |= MPA_IDLE_WITH_QMI_EMPTY;
-	xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
+	xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
 
 	xgene_sgmac_set_mac_addr(p);
 
-	data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
-	data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
-	xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
-
 	/* Adjust MDC clock frequency */
 	data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
 	MGMT_CLOCK_SEL_SET(&data, 7);
 	xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
 
 	/* Enable drop if bufpool not available */
-	data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
+	data = xgene_enet_rd_csr(p, rsif_config_reg);
 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
-	xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
-
-	/* Rtype should be copied from FP */
-	xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
+	xgene_enet_wr_csr(p, rsif_config_reg, data);
 
 	/* Bypass traffic gating */
-	xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
-	xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
-	xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
+	xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+	xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+	xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
 }
 
 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -317,9 +334,11 @@
 	if (!xgene_ring_mgr_init(p))
 		return -ENODEV;
 
-	clk_prepare_enable(p->clk);
-	clk_disable_unprepare(p->clk);
-	clk_prepare_enable(p->clk);
+	if (!IS_ERR(p->clk)) {
+		clk_prepare_enable(p->clk);
+		clk_disable_unprepare(p->clk);
+		clk_prepare_enable(p->clk);
+	}
 
 	xgene_enet_ecc_init(p);
 	xgene_enet_config_ring_if_assoc(p);
@@ -331,19 +350,29 @@
 				  u32 dst_ring_num, u16 bufpool_id)
 {
 	u32 data, fpsel;
+	u32 cle_bypass_reg0, cle_bypass_reg1;
 	u32 offset = p->port_id * MAC_OFFSET;
 
+	if (p->enet_id == XGENE_ENET1) {
+		cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+		cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+	} else {
+		cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+		cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+	}
+
 	data = CFG_CLE_BYPASS_EN0;
-	xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
+	xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
 
 	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
 	data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
-	xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
+	xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
 }
 
 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
 {
-	clk_disable_unprepare(p->clk);
+	if (!IS_ERR(p->clk))
+		clk_disable_unprepare(p->clk);
 }
 
 static void xgene_enet_link_state(struct work_struct *work)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index a18a9d1..05edb84 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -122,7 +122,6 @@
 
 	return true;
 }
-
 static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
 			      u32 rd_addr, u32 *rd_data)
 {
@@ -257,9 +256,11 @@
 	if (!xgene_ring_mgr_init(pdata))
 		return -ENODEV;
 
-	clk_prepare_enable(pdata->clk);
-	clk_disable_unprepare(pdata->clk);
-	clk_prepare_enable(pdata->clk);
+	if (!IS_ERR(pdata->clk)) {
+		clk_prepare_enable(pdata->clk);
+		clk_disable_unprepare(pdata->clk);
+		clk_prepare_enable(pdata->clk);
+	}
 
 	xgene_enet_ecc_init(pdata);
 	xgene_enet_config_ring_if_assoc(pdata);
@@ -286,7 +287,8 @@
 
 static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
 {
-	clk_disable_unprepare(pdata->clk);
+	if (!IS_ERR(pdata->clk))
+		clk_disable_unprepare(pdata->clk);
 }
 
 static void xgene_enet_link_state(struct work_struct *work)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 5a5296a..bf0a994 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -21,9 +21,28 @@
 #ifndef __XGENE_ENET_XGMAC_H__
 #define __XGENE_ENET_XGMAC_H__
 
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET	0x3000
 #define BLOCK_AXG_MAC_OFFSET		0x0800
 #define BLOCK_AXG_MAC_CSR_OFFSET	0x2000
 
+#define XGENET_CONFIG_REG_ADDR		0x20
+#define XGENET_SRST_ADDR		0x00
+#define XGENET_CLKEN_ADDR		0x08
+
+#define CSR_CLK		BIT(0)
+#define XGENET_CLK	BIT(1)
+#define PCS_CLK		BIT(3)
+#define AN_REF_CLK	BIT(4)
+#define AN_CLK		BIT(5)
+#define AD_CLK		BIT(6)
+
+#define CSR_RST		BIT(0)
+#define XGENET_RST	BIT(1)
+#define PCS_RST		BIT(3)
+#define AN_REF_RST	BIT(4)
+#define AN_RST		BIT(5)
+#define AD_RST		BIT(6)
+
 #define AXGMAC_CONFIG_0			0x0000
 #define AXGMAC_CONFIG_1			0x0004
 #define HSTMACRST			BIT(31)
@@ -38,6 +57,7 @@
 #define HSTMACADR_MSW_ADDR		0x0014
 #define HSTMAXFRAME_LENGTH_ADDR		0x0020
 
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR	0x0004
 #define XG_RSIF_CONFIG_REG_ADDR		0x00a0
 #define XCLE_BYPASS_REG0_ADDR           0x0160
 #define XCLE_BYPASS_REG1_ADDR           0x0164
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index 1375e2d..d19a41b 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on (PPC_PMAC && PPC32) || MAC
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -59,7 +57,6 @@
 	---help---
 	  Support for the onboard AMD 79C940 MACE Ethernet controller used in
 	  the 660AV and 840AV Macintosh.  If you have one of these Macintoshes
-	  say Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  say Y here.
 
 endif # NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index dea29ee..52a6b16 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -6,9 +6,7 @@
 	bool "ARC devices"
 	default y
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 58ad37c..e05b256 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index a6f9142..8be9eab 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -26,8 +26,7 @@
 	select PHYLIB
 	---help---
 	  If you have a network (Ethernet) controller of this type, say Y
-	  or M and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  or M here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called b44.
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 3e9c3fc7..65d88d7 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -1,6 +1,8 @@
 #ifndef _B44_H
 #define _B44_H
 
+#include <linux/brcmphy.h>
+
 /* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
 #define	B44_DEVCTRL	0x0000UL /* Device Control */
 #define  DEVCTRL_MPM		0x00000040 /* Magic Packet PME Enable (B0 only) */
@@ -281,8 +283,10 @@
 };
 
 #define B44_MCAST_TABLE_SIZE		32
-#define B44_PHY_ADDR_NO_LOCAL_PHY	30 /* no local phy regs */
-#define B44_PHY_ADDR_NO_PHY		31 /* no phy present at all */
+/* no local phy regs, e.g: Broadcom switches pseudo-PHY */
+#define B44_PHY_ADDR_NO_LOCAL_PHY	BRCM_PSEUDO_PHY_ADDR
+/* no phy present at all */
+#define B44_PHY_ADDR_NO_PHY		31
 #define B44_MDC_RATIO			5000000
 
 #define	B44_STAT_REG_DECLARE		\
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 783543a..909ad7a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -456,6 +456,67 @@
 	return 0;
 }
 
+static int bcm_sysport_get_coalesce(struct net_device *dev,
+				    struct ethtool_coalesce *ec)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
+
+	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
+	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+
+	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+
+	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
+	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+
+	return 0;
+}
+
+static int bcm_sysport_set_coalesce(struct net_device *dev,
+				    struct ethtool_coalesce *ec)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	u32 reg;
+
+	/* Base system clock is 125Mhz, DMA timeout is this reference clock
+	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
+	 * to fit in the RING_TIMEOUT_MASK (16 bits).
+	 */
+	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
+	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
+	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
+	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
+		return -EINVAL;
+
+	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
+	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
+		reg &= ~(RING_INTR_THRESH_MASK |
+			 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+		reg |= ec->tx_max_coalesced_frames;
+		reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+			 RING_TIMEOUT_SHIFT;
+		tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+	}
+
+	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+	reg &= ~(RDMA_INTR_THRESH_MASK |
+		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+	reg |= ec->rx_max_coalesced_frames;
+	reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
+			    RDMA_TIMEOUT_SHIFT;
+	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+
+	return 0;
+}
+
 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
 {
 	dev_kfree_skb_any(cb->skb);
@@ -463,67 +524,70 @@
 	dma_unmap_addr_set(cb, dma_addr, 0);
 }
 
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
-				 struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+					     struct bcm_sysport_cb *cb)
 {
 	struct device *kdev = &priv->pdev->dev;
 	struct net_device *ndev = priv->netdev;
+	struct sk_buff *skb, *rx_skb;
 	dma_addr_t mapping;
-	int ret;
 
-	cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
-	if (!cb->skb) {
+	/* Allocate a new SKB for a new packet */
+	skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+	if (!skb) {
+		priv->mib.alloc_rx_buff_failed++;
 		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
-		return -ENOMEM;
+		return NULL;
 	}
 
-	mapping = dma_map_single(kdev, cb->skb->data,
+	mapping = dma_map_single(kdev, skb->data,
 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
-	ret = dma_mapping_error(kdev, mapping);
-	if (ret) {
+	if (dma_mapping_error(kdev, mapping)) {
 		priv->mib.rx_dma_failed++;
-		bcm_sysport_free_cb(cb);
+		dev_kfree_skb_any(skb);
 		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
-		return ret;
+		return NULL;
 	}
 
-	dma_unmap_addr_set(cb, dma_addr, mapping);
-	dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+	/* Grab the current SKB on the ring */
+	rx_skb = cb->skb;
+	if (likely(rx_skb))
+		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
-	priv->rx_bd_assign_index++;
-	priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
-	priv->rx_bd_assign_ptr = priv->rx_bds +
-		(priv->rx_bd_assign_index * DESC_SIZE);
+	/* Put the new SKB on the ring */
+	cb->skb = skb;
+	dma_unmap_addr_set(cb, dma_addr, mapping);
+	dma_desc_set_addr(priv, cb->bd_addr, mapping);
 
 	netif_dbg(priv, rx_status, ndev, "RX refill\n");
 
-	return 0;
+	/* Return the current SKB to the caller */
+	return rx_skb;
 }
 
 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
 {
 	struct bcm_sysport_cb *cb;
-	int ret = 0;
+	struct sk_buff *skb;
 	unsigned int i;
 
 	for (i = 0; i < priv->num_rx_bds; i++) {
-		cb = &priv->rx_cbs[priv->rx_bd_assign_index];
-		if (cb->skb)
-			continue;
-
-		ret = bcm_sysport_rx_refill(priv, cb);
-		if (ret)
-			break;
+		cb = &priv->rx_cbs[i];
+		skb = bcm_sysport_rx_refill(priv, cb);
+		if (skb)
+			dev_kfree_skb(skb);
+		if (!cb->skb)
+			return -ENOMEM;
 	}
 
-	return ret;
+	return 0;
 }
 
 /* Poll the hardware for up to budget packets to process */
 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
 					unsigned int budget)
 {
-	struct device *kdev = &priv->pdev->dev;
 	struct net_device *ndev = priv->netdev;
 	unsigned int processed = 0, to_process;
 	struct bcm_sysport_cb *cb;
@@ -531,7 +595,6 @@
 	unsigned int p_index;
 	u16 len, status;
 	struct bcm_rsb *rsb;
-	int ret;
 
 	/* Determine how much we should process since last call */
 	p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -549,13 +612,8 @@
 
 	while ((processed < to_process) && (processed < budget)) {
 		cb = &priv->rx_cbs[priv->rx_read_ptr];
-		skb = cb->skb;
+		skb = bcm_sysport_rx_refill(priv, cb);
 
-		processed++;
-		priv->rx_read_ptr++;
-
-		if (priv->rx_read_ptr == priv->num_rx_bds)
-			priv->rx_read_ptr = 0;
 
 		/* We do not have a backing SKB, so we do not a corresponding
 		 * DMA mapping for this incoming packet since
@@ -566,12 +624,9 @@
 			netif_err(priv, rx_err, ndev, "out of memory!\n");
 			ndev->stats.rx_dropped++;
 			ndev->stats.rx_errors++;
-			goto refill;
+			goto next;
 		}
 
-		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
 		/* Extract the Receive Status Block prepended */
 		rsb = (struct bcm_rsb *)skb->data;
 		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -583,12 +638,20 @@
 			  p_index, priv->rx_c_index, priv->rx_read_ptr,
 			  len, status);
 
+		if (unlikely(len > RX_BUF_LENGTH)) {
+			netif_err(priv, rx_status, ndev, "oversized packet\n");
+			ndev->stats.rx_length_errors++;
+			ndev->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto next;
+		}
+
 		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
 			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
 			ndev->stats.rx_dropped++;
 			ndev->stats.rx_errors++;
-			bcm_sysport_free_cb(cb);
-			goto refill;
+			dev_kfree_skb_any(skb);
+			goto next;
 		}
 
 		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -597,8 +660,8 @@
 				ndev->stats.rx_over_errors++;
 			ndev->stats.rx_dropped++;
 			ndev->stats.rx_errors++;
-			bcm_sysport_free_cb(cb);
-			goto refill;
+			dev_kfree_skb_any(skb);
+			goto next;
 		}
 
 		skb_put(skb, len);
@@ -625,10 +688,12 @@
 		ndev->stats.rx_bytes += len;
 
 		napi_gro_receive(&priv->napi, skb);
-refill:
-		ret = bcm_sysport_rx_refill(priv, cb);
-		if (ret)
-			priv->mib.alloc_rx_buff_failed++;
+next:
+		processed++;
+		priv->rx_read_ptr++;
+
+		if (priv->rx_read_ptr == priv->num_rx_bds)
+			priv->rx_read_ptr = 0;
 	}
 
 	return processed;
@@ -1269,14 +1334,14 @@
 
 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
 {
+	struct bcm_sysport_cb *cb;
 	u32 reg;
 	int ret;
+	int i;
 
 	/* Initialize SW view of the RX ring */
 	priv->num_rx_bds = NUM_RX_DESC;
 	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
-	priv->rx_bd_assign_ptr = priv->rx_bds;
-	priv->rx_bd_assign_index = 0;
 	priv->rx_c_index = 0;
 	priv->rx_read_ptr = 0;
 	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1286,6 +1351,11 @@
 		return -ENOMEM;
 	}
 
+	for (i = 0; i < priv->num_rx_bds; i++) {
+		cb = priv->rx_cbs + i;
+		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+	}
+
 	ret = bcm_sysport_alloc_rx_bufs(priv);
 	if (ret) {
 		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
@@ -1641,6 +1711,8 @@
 	.get_sset_count		= bcm_sysport_get_sset_count,
 	.get_wol		= bcm_sysport_get_wol,
 	.set_wol		= bcm_sysport_set_wol,
+	.get_coalesce		= bcm_sysport_get_coalesce,
+	.set_coalesce		= bcm_sysport_set_coalesce,
 };
 
 static const struct net_device_ops bcm_sysport_netdev_ops = {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index e2c043e..f28bf54 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -292,7 +292,7 @@
 #define RDMA_END_ADDR_LO		0x102c
 
 #define RDMA_MBDONE_INTR		0x1030
-#define  RDMA_INTR_THRESH_MASK		0xff
+#define  RDMA_INTR_THRESH_MASK		0x1ff
 #define  RDMA_TIMEOUT_SHIFT		16
 #define  RDMA_TIMEOUT_MASK		0xffff
 
@@ -663,8 +663,6 @@
 
 	/* Receive queue */
 	void __iomem		*rx_bds;
-	void __iomem		*rx_bd_assign_ptr;
-	unsigned int		rx_bd_assign_index;
 	struct bcm_sysport_cb	*rx_cbs;
 	unsigned int		num_rx_bds;
 	unsigned int		rx_read_ptr;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index db27feb..4fbb093 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -13,6 +13,7 @@
 	dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
 
 #include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
 #include <linux/netdevice.h>
 
 #define BGMAC_DEV_CTL				0x000
@@ -349,7 +350,7 @@
 #define BGMAC_DESC_CTL0_SOF			0x80000000	/* Start of frame */
 #define BGMAC_DESC_CTL1_LEN			0x00001FFF
 
-#define BGMAC_PHY_NOREGS			0x1E
+#define BGMAC_PHY_NOREGS			BRCM_PSEUDO_PHY_ADDR
 #define BGMAC_PHY_MASK				0x1F
 
 #define BGMAC_MAX_TX_RINGS			4
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 1f82a04..7a4aaa3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -357,6 +357,7 @@
 struct sw_rx_page {
 	struct page	*page;
 	DEFINE_DMA_UNMAP_ADDR(mapping);
+	unsigned int	offset;
 };
 
 union db_prod {
@@ -381,9 +382,10 @@
 
 #define PAGES_PER_SGE_SHIFT	0
 #define PAGES_PER_SGE		(1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE		PAGE_SIZE
-#define SGE_PAGE_SHIFT		PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr)	PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT		12
+#define SGE_PAGE_SIZE		(1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK		(~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr)	(((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
 #define SGE_PAGES		(SGE_PAGE_SIZE * PAGES_PER_SGE)
 #define TPA_AGG_SIZE		min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
 					    SGE_PAGES), 0xffff)
@@ -526,6 +528,12 @@
 	TPA_MODE_GRO
 };
 
+struct bnx2x_alloc_pool {
+	struct page	*page;
+	dma_addr_t	dma;
+	unsigned int	offset;
+};
+
 struct bnx2x_fastpath {
 	struct bnx2x		*bp; /* parent */
 
@@ -599,6 +607,8 @@
 	     4 (for the digits and to make it DWORD aligned) */
 #define FP_NAME_SIZE		(sizeof(((struct net_device *)0)->name) + 8)
 	char			name[FP_NAME_SIZE];
+
+	struct bnx2x_alloc_pool	page_pool;
 };
 
 #define bnx2x_fp(bp, nr, var)	((bp)->fp[(nr)].var)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec56a9b..e2a6533 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -544,30 +544,49 @@
 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			      u16 index, gfp_t gfp_mask)
 {
-	struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+	struct bnx2x_alloc_pool *pool = &fp->page_pool;
 	dma_addr_t mapping;
 
-	if (unlikely(page == NULL)) {
-		BNX2X_ERR("Can't alloc sge\n");
-		return -ENOMEM;
+	if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
+
+		/* put page reference used by the memory pool, since we
+		 * won't be using this page as the mempool anymore.
+		 */
+		if (pool->page)
+			put_page(pool->page);
+
+		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+		if (unlikely(!pool->page)) {
+			BNX2X_ERR("Can't alloc sge\n");
+			return -ENOMEM;
+		}
+
+		pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
+					 PAGE_SIZE, DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(&bp->pdev->dev,
+					       pool->dma))) {
+			__free_pages(pool->page, PAGES_PER_SGE_SHIFT);
+			pool->page = NULL;
+			BNX2X_ERR("Can't map sge\n");
+			return -ENOMEM;
+		}
+		pool->offset = 0;
 	}
 
-	mapping = dma_map_page(&bp->pdev->dev, page, 0,
-			       SGE_PAGES, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-		__free_pages(page, PAGES_PER_SGE_SHIFT);
-		BNX2X_ERR("Can't map sge\n");
-		return -ENOMEM;
-	}
+	get_page(pool->page);
+	sw_buf->page = pool->page;
+	sw_buf->offset = pool->offset;
 
-	sw_buf->page = page;
+	mapping = pool->dma + sw_buf->offset;
 	dma_unmap_addr_set(sw_buf, mapping, mapping);
 
 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 
+	pool->offset += SGE_PAGE_SIZE;
+
 	return 0;
 }
 
@@ -629,20 +648,22 @@
 			return err;
 		}
 
-		/* Unmap the page as we're going to pass it to the stack */
-		dma_unmap_page(&bp->pdev->dev,
-			       dma_unmap_addr(&old_rx_pg, mapping),
-			       SGE_PAGES, DMA_FROM_DEVICE);
+		dma_unmap_single(&bp->pdev->dev,
+				 dma_unmap_addr(&old_rx_pg, mapping),
+				 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 		/* Add one frag and update the appropriate fields in the skb */
 		if (fp->mode == TPA_MODE_LRO)
-			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+			skb_fill_page_desc(skb, j, old_rx_pg.page,
+					   old_rx_pg.offset, frag_len);
 		else { /* GRO */
 			int rem;
 			int offset = 0;
 			for (rem = frag_len; rem > 0; rem -= gro_size) {
 				int len = rem > gro_size ? gro_size : rem;
 				skb_fill_page_desc(skb, frag_id++,
-						   old_rx_pg.page, offset, len);
+						   old_rx_pg.page,
+						   old_rx_pg.offset + offset,
+						   len);
 				if (offset)
 					get_page(old_rx_pg.page);
 				offset += len;
@@ -662,7 +683,7 @@
 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 {
 	if (fp->rx_frag_size)
-		put_page(virt_to_head_page(data));
+		skb_free_frag(data);
 	else
 		kfree(data);
 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d7a7175..2b30081 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -804,9 +804,13 @@
 	if (!page)
 		return;
 
-	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
-		       SGE_PAGES, DMA_FROM_DEVICE);
-	__free_pages(page, PAGES_PER_SGE_SHIFT);
+	/* Since many fragments can share the same page, make sure to
+	 * only unmap and free the page once.
+	 */
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+			 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+	put_page(page);
 
 	sw_buf->page = NULL;
 	sge->addr_hi = 0;
@@ -964,6 +968,25 @@
 	((u8 *)fw_lo)[1]  = mac[4];
 }
 
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+					  struct bnx2x_alloc_pool *pool)
+{
+	if (!pool->page)
+		return;
+
+	/* Page was not fully fragmented.  Unmap unused space */
+	if (pool->offset < PAGE_SIZE) {
+		dma_addr_t dma = pool->dma + pool->offset;
+		int size = PAGE_SIZE - pool->offset;
+
+		dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
+	}
+
+	put_page(pool->page);
+
+	pool->page = NULL;
+}
+
 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
 					   struct bnx2x_fastpath *fp, int last)
 {
@@ -974,6 +997,8 @@
 
 	for (i = 0; i < last; i++)
 		bnx2x_free_rx_sge(bp, fp, i);
+
+	bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
 }
 
 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6043734..b43b2cb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2770,12 +2770,79 @@
 	return ret;
 }
 
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+	struct bcmgenet_priv *priv = ring->priv;
+	u32 p_index, c_index, intsts, intmsk;
+	struct netdev_queue *txq;
+	unsigned int free_bds;
+	unsigned long flags;
+	bool txq_stopped;
+
+	if (!netif_msg_tx_err(priv))
+		return;
+
+	txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (ring->index == DESC_INDEX) {
+		intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+		intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+	} else {
+		intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+		intmsk = 1 << ring->index;
+	}
+	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+	txq_stopped = netif_tx_queue_stopped(txq);
+	free_bds = ring->free_bds;
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+		  "TX queue status: %s, interrupts: %s\n"
+		  "(sw)free_bds: %d (sw)size: %d\n"
+		  "(sw)p_index: %d (hw)p_index: %d\n"
+		  "(sw)c_index: %d (hw)c_index: %d\n"
+		  "(sw)clean_p: %d (sw)write_p: %d\n"
+		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+		  ring->index, ring->queue,
+		  txq_stopped ? "stopped" : "active",
+		  intsts & intmsk ? "enabled" : "disabled",
+		  free_bds, ring->size,
+		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
+		  ring->c_index, c_index & DMA_C_INDEX_MASK,
+		  ring->clean_ptr, ring->write_ptr,
+		  ring->cb_ptr, ring->end_ptr);
+}
+
 static void bcmgenet_timeout(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 int0_enable = 0;
+	u32 int1_enable = 0;
+	unsigned int q;
 
 	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
 
+	bcmgenet_disable_tx_napi(priv);
+
+	for (q = 0; q < priv->hw_params->tx_queues; q++)
+		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+	bcmgenet_tx_reclaim_all(dev);
+
+	for (q = 0; q < priv->hw_params->tx_queues; q++)
+		int1_enable |= (1 << q);
+
+	int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+	/* Re-enable TX interrupts if disabled */
+	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+	bcmgenet_enable_tx_napi(priv);
+
 	dev->trans_start = jiffies;
 
 	dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 420949cc..6bef04e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -47,7 +47,12 @@
 			   HZ / 100);
 	ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 
-	if (ret & MDIO_READ_FAIL)
+	/* Some broken devices are known not to release the line during
+	 * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+	 * that condition here and ignore the MDIO controller read failure
+	 * indication.
+	 */
+	if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL))
 		return -EIO;
 
 	return ret & 0xffff;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 069952f..73c934c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6618,7 +6618,7 @@
 static void tg3_frag_free(bool is_frag, void *data)
 {
 	if (is_frag)
-		put_page(virt_to_head_page(data));
+		skb_free_frag(data);
 	else
 		kfree(data);
 }
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig
index 4e8c0b6..c4bbe54 100644
--- a/drivers/net/ethernet/brocade/Kconfig
+++ b/drivers/net/ethernet/brocade/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 6e10b99..8584abc 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -9,5 +9,3 @@
 bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
 bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
 bna-objs += cna_fwimg.o
-
-EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index cf9f395..95bc8b6 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -282,7 +282,6 @@
 	cee->ioc = ioc;
 
 	bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
-	bfa_q_qe_init(&cee->ioc_notify);
 	bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
 	bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
 }
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index af25d8e..1d11d66 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -28,19 +28,6 @@
 
 typedef void (*bfa_sm_t)(void *sm, int event);
 
-/* oc - object class eg. bfa_ioc
- * st - state, eg. reset
- * otype - object type, eg. struct bfa_ioc
- * etype - object type, eg. enum ioc_event
- */
-#define bfa_sm_state_decl(oc, st, otype, etype)			\
-	static void oc ## _sm_ ## st(otype * fsm, etype event)
-
-#define bfa_sm_set_state(_sm, _state)	((_sm)->sm = (bfa_sm_t)(_state))
-#define bfa_sm_send_event(_sm, _event)	((_sm)->sm((_sm), (_event)))
-#define bfa_sm_get_state(_sm)		((_sm)->sm)
-#define bfa_sm_cmp_state(_sm, _state)	((_sm)->sm == (bfa_sm_t)(_state))
-
 /* For converting from state machine function to state encoding. */
 struct bfa_sm_table {
 	bfa_sm_t	sm;	/*!< state machine function	*/
@@ -67,7 +54,6 @@
 } while (0)
 
 #define bfa_fsm_send_event(_fsm, _event)	((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_get_state(_fsm)			((_fsm)->fsm)
 #define bfa_fsm_cmp_state(_fsm, _state)					\
 	((_fsm)->fsm == (bfa_fsm_t)(_state))
 
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 3bfd9da..d152b3f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -24,7 +24,6 @@
 #include "bfa_defs_status.h"
 #include "bfa_defs_mfg_comm.h"
 
-#define BFA_STRING_32	32
 #define BFA_VERSION_LEN 64
 
 /* ---------------------- adapter definitions ------------ */
@@ -55,7 +54,7 @@
 	char		optrom_ver[BFA_VERSION_LEN];
 	char		os_type[BFA_ADAPTER_OS_TYPE_LEN];
 	struct bfa_mfg_vpd vpd;
-	struct mac mac;
+	u8		mac[ETH_ALEN];
 
 	u8		nports;
 	u8		max_speed;
@@ -187,8 +186,6 @@
 #define BFA_MFG_SUPPLIER_SERIALNUM_SIZE		20
 #define BFA_MFG_SUPPLIER_REVISION_SIZE		4
 
-#pragma pack(1)
-
 /* BFA adapter manufacturing block definition.
  *
  * All numerical fields are in big-endian format.
@@ -211,7 +208,7 @@
 	char	supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
 	char	supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
 	char	supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
-	mac_t	mfg_mac;	/* base mac address */
+	u8	mfg_mac[ETH_ALEN]; /* base mac address */
 	u8	num_mac;	/* number of mac addresses */
 	u8	rsv2;
 	u32	card_type;	/* card type          */
@@ -227,9 +224,7 @@
 	char	initial_mode[8]; /* initial mode: hba/cna/nic */
 	u8	rsv4[84];
 	u8	md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
-};
-
-#pragma pack()
+} __packed;
 
 /* ---------------------- pci definitions ------------ */
 
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index a37326d..f048887 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -109,8 +109,6 @@
 	struct bfa_port_eth_stats eth;
 };
 
-#pragma pack(1)
-
 #define BFA_CEE_LLDP_MAX_STRING_LEN (128)
 #define BFA_CEE_DCBX_MAX_PRIORITY	(8)
 #define BFA_CEE_DCBX_MAX_PGID		(8)
@@ -133,7 +131,7 @@
 	u8 len;
 	u8 rsvd[2];
 	u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
-};
+} __packed;
 
 /* LLDP parameters */
 struct bfa_cee_lldp_cfg {
@@ -145,7 +143,7 @@
 	struct bfa_cee_lldp_str mgmt_addr;
 	u16 time_to_live;
 	u16 enabled_system_cap;
-};
+} __packed;
 
 enum bfa_cee_dcbx_version {
 	DCBX_PROTOCOL_PRECEE	= 1,
@@ -171,7 +169,7 @@
 	u8 lls_fcoe; /* FCoE Logical Link Status */
 	u8 lls_lan; /* LAN Logical Link Status */
 	u8 rsvd[2];
-};
+} __packed;
 
 /* CEE status */
 /* Making this to tri-state for the benefit of port list command */
@@ -188,11 +186,11 @@
 	u8 error_reason;
 	struct bfa_cee_lldp_cfg lldp_remote;
 	struct bfa_cee_dcbx_cfg dcbx_remote;
-	mac_t src_mac;
+	u8 src_mac[ETH_ALEN];
 	u8 link_speed;
 	u8 nw_priority;
 	u8 filler[2];
-};
+} __packed;
 
 /* LLDP/DCBX/CEE Statistics */
 struct bfa_cee_stats {
@@ -214,8 +212,6 @@
 	u32	cee_status_up;		/*!< CEE status up */
 	u32	cee_hw_cfg_changed;	/*!< CEE hw cfg changed */
 	u32	cee_rx_invalid_cfg;	/*!< CEE invalid cfg */
-};
-
-#pragma pack()
+} __packed;
 
 #endif	/* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 7a45cd0..7e17451 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -59,8 +59,6 @@
 	BFA_MFG_TYPE_INVALID = 0,	 /*!< Invalid card type		*/
 };
 
-#pragma pack(1)
-
 /* Check if Mezz card */
 #define bfa_mfg_is_mezz(type) (( \
 	(type) == BFA_MFG_TYPE_JAYHAWK || \
@@ -77,7 +75,7 @@
 	CB_GPIO_FC4P2   = (4),		/*!< 4G 2port FC card		*/
 	CB_GPIO_FC4P1   = (5),		/*!< 4G 1port FC card		*/
 	CB_GPIO_DFLY    = (6),		/*!< 8G 2port FC mezzanine card	*/
-	CB_GPIO_PROTO   = (1 << 7)	/*!< 8G 2port FC prototypes	*/
+	CB_GPIO_PROTO   = BIT(7)	/*!< 8G 2port FC prototypes	*/
 };
 
 #define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop)	\
@@ -86,7 +84,7 @@
 		(prop) |= BFI_ADAPTER_PROTO;			\
 		(gpio) &= ~CB_GPIO_PROTO;			\
 	}							\
-	switch ((gpio)) {					\
+	switch (gpio) {						\
 	case CB_GPIO_TTV:					\
 		(prop) |= BFI_ADAPTER_TTV;			\
 	case CB_GPIO_DFLY:					\
@@ -148,8 +146,6 @@
 	u8	len;		/*!< vpd data length excluding header */
 	u8	rsv;
 	u8		data[BFA_MFG_VPD_LEN];	/*!< vpd data */
-};
-
-#pragma pack()
+} __packed;
 
 #endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 68f3c13..b7a0f78 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -23,14 +23,6 @@
 
 /* IOC local definitions */
 
-#define bfa_ioc_state_disabled(__sm)			\
-	(((__sm) == BFI_IOC_UNINIT) ||			\
-	 ((__sm) == BFI_IOC_INITING) ||			\
-	 ((__sm) == BFI_IOC_HWINIT) ||			\
-	 ((__sm) == BFI_IOC_DISABLED) ||		\
-	 ((__sm) == BFI_IOC_FAIL) ||			\
-	 ((__sm) == BFI_IOC_CFG_DISABLED))
-
 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
 
 #define bfa_ioc_firmware_lock(__ioc)			\
@@ -57,12 +49,6 @@
 			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
 		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
-#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
-			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
-
-#define bfa_ioc_mbox_cmd_pending(__ioc)		\
-			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
-			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
 
 static bool bfa_nw_auto_recover = true;
 
@@ -1105,12 +1091,9 @@
 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
 {
 	struct bfa_ioc_notify *notify;
-	struct list_head			*qe;
 
-	list_for_each(qe, &ioc->notify_q) {
-		notify = (struct bfa_ioc_notify *)qe;
+	list_for_each_entry(notify, &ioc->notify_q, qe)
 		notify->cbfn(notify->cbarg, event);
-	}
 }
 
 static void
@@ -1321,7 +1304,7 @@
 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
 	     i++) {
 		fwsig[i] =
-			swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+			swab32(readl(loff + ioc->ioc_regs.smem_page_start));
 		loff += sizeof(u32);
 	}
 }
@@ -1387,7 +1370,7 @@
 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
 			 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
 {
-	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+	if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
 		return BFI_IOC_IMG_VER_INCOMP;
 
 	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
@@ -1398,7 +1381,7 @@
 	/* GA takes priority over internal builds of the same patch stream.
 	 * At this point major minor maint and patch numbers are same.
 	 */
-	if (fwhdr_is_ga(base_fwhdr) == true)
+	if (fwhdr_is_ga(base_fwhdr))
 		if (fwhdr_is_ga(fwhdr_to_cmp))
 			return BFI_IOC_IMG_VER_SAME;
 		else
@@ -1692,7 +1675,7 @@
 {
 	int	locked;
 
-	locked = readl((bar + FLASH_SEM_LOCK_REG));
+	locked = readl(bar + FLASH_SEM_LOCK_REG);
 
 	return !locked;
 }
@@ -1912,10 +1895,8 @@
 }
 
 void
-bfa_nw_ioc_timeout(void *ioc_arg)
+bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
 {
-	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
 }
 
@@ -1980,10 +1961,9 @@
 }
 
 void
-bfa_nw_ioc_hb_check(void *cbarg)
+bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
 {
-	struct bfa_ioc *ioc = cbarg;
-	u32	hb_count;
+	u32 hb_count;
 
 	hb_count = readl(ioc->ioc_regs.heartbeat);
 	if (ioc->hb_count == hb_count) {
@@ -2069,8 +2049,8 @@
 		/**
 		 * write smem
 		 */
-		writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
-			      ((ioc->ioc_regs.smem_page_start) + (loff)));
+		writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]),
+		       ioc->ioc_regs.smem_page_start + loff);
 
 		loff += sizeof(u32);
 
@@ -2177,7 +2157,8 @@
 	/**
 	 * Enqueue command to firmware.
 	 */
-	bfa_q_deq(&mod->cmd_q, &cmd);
+	cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+	list_del(&cmd->qe);
 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
 
 	/**
@@ -2198,8 +2179,10 @@
 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
 	struct bfa_mbox_cmd *cmd;
 
-	while (!list_empty(&mod->cmd_q))
-		bfa_q_deq(&mod->cmd_q, &cmd);
+	while (!list_empty(&mod->cmd_q)) {
+		cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+		list_del(&cmd->qe);
+	}
 }
 
 /**
@@ -2223,14 +2206,14 @@
 	/*
 	 *  Hold semaphore to serialize pll init and fwtrc.
 	*/
-	if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+	if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
 		return 1;
 
 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
 	len = sz/sizeof(u32);
 	for (i = 0; i < len; i++) {
-		r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+		r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start));
 		buf[i] = be32_to_cpu(r32);
 		loff += sizeof(u32);
 
@@ -2278,7 +2261,7 @@
 	int tlen;
 
 	if (ioc->dbg_fwsave_once) {
-		ioc->dbg_fwsave_once = 0;
+		ioc->dbg_fwsave_once = false;
 		if (ioc->dbg_fwsave_len) {
 			tlen = ioc->dbg_fwsave_len;
 			bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
@@ -2796,7 +2779,7 @@
 		ad_attr->prototype = 0;
 
 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
-	ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
+	bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
 
 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -2942,10 +2925,10 @@
 	return ioc->attr->pwwn;
 }
 
-mac_t
-bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+void
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
 {
-	return ioc->attr->mac;
+	ether_addr_copy(mac, ioc->attr->mac);
 }
 
 /* Firmware failure detected. Start recovery actions. */
@@ -2997,9 +2980,8 @@
 }
 
 void
-bfa_nw_iocpf_timeout(void *ioc_arg)
+bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
 {
-	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
 	enum bfa_iocpf_state iocpf_st;
 
 	iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
@@ -3011,10 +2993,8 @@
 }
 
 void
-bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
 {
-	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
-
 	bfa_ioc_hw_sem_get(ioc);
 }
 
@@ -3245,7 +3225,6 @@
 	flash->op_busy = 0;
 
 	bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
-	bfa_q_qe_init(&flash->ioc_notify);
 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
 }
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index effb715..2c0b4c0 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -232,12 +232,6 @@
 #define bfa_ioc_asic_gen(__ioc)		((__ioc)->asic_gen)
 #define bfa_ioc_is_default(__ioc)	\
 	(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
-#define bfa_ioc_fetch_stats(__ioc, __stats) \
-		(((__stats)->drv_stats) = (__ioc)->stats)
-#define bfa_ioc_clr_stats(__ioc)	\
-		memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
-#define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
-#define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)	\
 	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
 #define bfa_ioc_get_nports(__ioc)	\
@@ -268,13 +262,6 @@
 	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
 			   (__ioc)->asic_mode))
 
-#define	bfa_ioc_isr_mode_set(__ioc, __msix) do {			\
-	if ((__ioc)->ioc_hwif->ioc_isr_mode_set)			\
-		((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));	\
-} while (0)
-#define	bfa_ioc_ownership_reset(__ioc)				\
-			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
-
 #define bfa_ioc_lpu_read_stat(__ioc) do {				\
 		if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)		\
 			((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));	\
@@ -309,7 +296,7 @@
 			struct bfi_ioc_image_hdr *fwhdr);
 bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
 			struct bfi_ioc_image_hdr *fwhdr);
-mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac);
 void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
 int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
 int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
@@ -317,10 +304,10 @@
 /*
  * Timeout APIs
  */
-void bfa_nw_ioc_timeout(void *ioc);
-void bfa_nw_ioc_hb_check(void *ioc);
-void bfa_nw_iocpf_timeout(void *ioc);
-void bfa_nw_iocpf_sem_timeout(void *ioc);
+void bfa_nw_ioc_timeout(struct bfa_ioc *ioc);
+void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc);
 
 /*
  * F/W Image Size & Chunk
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 2e72445..74e5ed5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -23,8 +23,7 @@
 #include "bfi_reg.h"
 #include "bfa_defs.h"
 
-#define bfa_ioc_ct_sync_pos(__ioc)	\
-		((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define bfa_ioc_ct_sync_pos(__ioc)	BIT(bfa_ioc_pcifn(__ioc))
 #define BFA_IOC_SYNC_REQD_SH		16
 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
@@ -536,7 +535,7 @@
 {
 	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
 
-	writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+	writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync);
 }
 
 static bool
@@ -667,7 +666,7 @@
 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
 	}
-	r32 = readl((rb + PSS_CTL_REG));
+	r32 = readl(rb + PSS_CTL_REG);
 	r32 &= ~__PSS_LMEM_RESET;
 	writel(r32, (rb + PSS_CTL_REG));
 	udelay(1000);
@@ -678,7 +677,7 @@
 
 	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
 	udelay(1000);
-	r32 = readl((rb + MBIST_STAT_REG));
+	r32 = readl(rb + MBIST_STAT_REG);
 	writel(0, (rb + MBIST_CTL_REG));
 	return BFA_STATUS_OK;
 }
@@ -691,7 +690,7 @@
 	/*
 	 * put s_clk PLL and PLL FSM in reset
 	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
 	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
 		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
@@ -701,28 +700,28 @@
 	 * Ignore mode and program for the max clock (which is FC16)
 	 * Firmware/NFC will do the PLL init appropriately
 	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
 
 	/*
 	 * while doing PLL init dont clock gate ethernet subsystem
 	 */
-	r32 = readl((rb + CT2_CHIP_MISC_PRG));
-	writel((r32 | __ETH_CLK_ENABLE_PORT0),
-				(rb + CT2_CHIP_MISC_PRG));
+	r32 = readl(rb + CT2_CHIP_MISC_PRG);
+	writel(r32 | __ETH_CLK_ENABLE_PORT0,
+	       rb + CT2_CHIP_MISC_PRG);
 
-	r32 = readl((rb + CT2_PCIE_MISC_REG));
-	writel((r32 | __ETH_CLK_ENABLE_PORT1),
-				(rb + CT2_PCIE_MISC_REG));
+	r32 = readl(rb + CT2_PCIE_MISC_REG);
+	writel(r32 | __ETH_CLK_ENABLE_PORT1,
+	       rb + CT2_PCIE_MISC_REG);
 
 	/*
 	 * set sclk value
 	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
 		__APP_PLL_SCLK_CLK_DIV2);
-	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+	writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
 
 	/*
 	 * poll for s_clk lock or delay 1ms
@@ -743,28 +742,28 @@
 	/*
 	 * put l_clk PLL and PLL FSM in reset
 	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
 	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
 		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
-	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+	writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
 
 	/*
 	 * set LPU speed (set for FC16 which will work for other modes)
 	 */
-	r32 = readl((rb + CT2_CHIP_MISC_PRG));
+	r32 = readl(rb + CT2_CHIP_MISC_PRG);
 	writel(r32, (rb + CT2_CHIP_MISC_PRG));
 
 	/*
 	 * set LPU half speed (set for FC16 which will work for other modes)
 	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+	writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
 
 	/*
 	 * set lclk for mode (set for FC16)
 	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
 	r32 |= 0x20c1731b;
 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
@@ -780,14 +779,14 @@
 {
 	u32 r32;
 
-	r32 = readl((rb + PSS_CTL_REG));
+	r32 = readl(rb + PSS_CTL_REG);
 	r32 &= ~__PSS_LMEM_RESET;
-	writel(r32, (rb + PSS_CTL_REG));
+	writel(r32, rb + PSS_CTL_REG);
 	udelay(1000);
 
-	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+	writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
 	udelay(1000);
-	writel(0, (rb + CT2_MBIST_CTL_REG));
+	writel(0, rb + CT2_MBIST_CTL_REG);
 }
 
 static void
@@ -801,22 +800,22 @@
 	/*
 	 * release soft reset on s_clk & l_clk
 	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-	writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
-			(rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+	       rb + CT2_APP_PLL_SCLK_CTL_REG);
 
 	/*
 	 * release soft reset on s_clk & l_clk
 	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-	writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
-			(rb + CT2_APP_PLL_LCLK_CTL_REG));
+	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+	       rb + CT2_APP_PLL_LCLK_CTL_REG);
 
 	/* put port0, port1 MAC & AHB in reset */
-	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
-			(rb + CT2_CSI_MAC_CONTROL_REG(0)));
-	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
-			(rb + CT2_CSI_MAC_CONTROL_REG(1)));
+	writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
+	       rb + CT2_CSI_MAC_CONTROL_REG(0));
+	writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
+	       rb + CT2_CSI_MAC_CONTROL_REG(1));
 }
 
 #define CT2_NFC_MAX_DELAY       1000
@@ -861,8 +860,8 @@
 
 	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
 
-	if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
-		(nfc_ver >= CT2_NFC_VER_VALID)) {
+	if (wgn == (__A2T_AHB_LOAD | __WGN_READY) &&
+	    nfc_ver >= CT2_NFC_VER_VALID) {
 		if (bfa_ioc_ct2_nfc_halted(rb))
 			bfa_ioc_ct2_nfc_resume(rb);
 		writel(__RESET_AND_START_SCLK_LCLK_PLLS,
@@ -899,19 +898,19 @@
 		bfa_ioc_ct2_lclk_init(rb);
 
 		/* release soft reset on s_clk & l_clk */
-		r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+		r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 		writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
 				rb + CT2_APP_PLL_SCLK_CTL_REG);
-		r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+		r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 		writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
 				rb + CT2_APP_PLL_LCLK_CTL_REG);
 	}
 
 	/* Announce flash device presence, if flash was corrupted. */
 	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-		r32 = readl((rb + PSS_GPIO_OUT_REG));
+		r32 = readl(rb + PSS_GPIO_OUT_REG);
 		writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
-		r32 = readl((rb + PSS_GPIO_OE_REG));
+		r32 = readl(rb + PSS_GPIO_OE_REG);
 		writel(r32 | 1, rb + PSS_GPIO_OE_REG);
 	}
 
@@ -919,27 +918,27 @@
 	 * Mask the interrupts and clear any
 	 * pending interrupts left by BIOS/EFI
 	 */
-	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
-	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+	writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
+	writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
 
 	/* For first time initialization, no need to clear interrupts */
 	r32 = readl(rb + HOST_SEM5_REG);
 	if (r32 & 0x1) {
-		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
 		if (r32 == 1) {
-			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
-			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+			writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+			readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
 		}
-		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
 		if (r32 == 1) {
-			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
-			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+			writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+			readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
 		}
 	}
 
 	bfa_ioc_ct2_mem_init(rb);
 
-	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
-	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
 	return BFA_STATUS_OK;
 }
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index c07d5b9..9c5bb24 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -66,8 +66,9 @@
 	cmdq->offset = 0;
 	cmdq->bytes_to_copy = 0;
 	while (!list_empty(&cmdq->pending_q)) {
-		bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
-		bfa_q_qe_init(&cmdq_ent->qe);
+		cmdq_ent = list_first_entry(&cmdq->pending_q,
+					    struct bfa_msgq_cmd_entry, qe);
+		list_del(&cmdq_ent->qe);
 		call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
 	}
 }
@@ -242,8 +243,8 @@
 
 	/* Walk through pending list to see if the command can be posted */
 	while (!list_empty(&cmdq->pending_q)) {
-		cmd =
-		(struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
+		cmd = list_first_entry(&cmdq->pending_q,
+				       struct bfa_msgq_cmd_entry, qe);
 		if (ntohs(cmd->msg_hdr->num_entries) <=
 			BFA_MSGQ_FREE_CNT(cmdq)) {
 			list_del(&cmd->qe);
@@ -615,7 +616,6 @@
 	bfa_msgq_rspq_attach(&msgq->rspq, msgq);
 
 	bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
-	bfa_q_qe_init(&msgq->ioc_notify);
 	bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
 	bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
 }
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 2bcde40..81e59ea 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -21,8 +21,6 @@
 
 #include "bfa_defs.h"
 
-#pragma pack(1)
-
 /* BFI FW image type */
 #define	BFI_FLASH_CHUNK_SZ			256	/*!< Flash chunk size */
 #define	BFI_FLASH_CHUNK_SZ_WORDS	(BFI_FLASH_CHUNK_SZ/sizeof(u32))
@@ -36,10 +34,10 @@
 		struct {
 			u8	qid;
 			u8	fn_lpu;	/*!< msg destination		    */
-		} h2i;
+		} __packed h2i;
 		u16	i2htok;	/*!< token in msgs to host	    */
-	} mtag;
-};
+	} __packed mtag;
+} __packed;
 
 #define bfi_fn_lpu(__fn, __lpu)	((__fn) << 1 | (__lpu))
 #define bfi_mhdr_2_fn(_mh)	((_mh)->mtag.h2i.fn_lpu >> 1)
@@ -75,14 +73,14 @@
 	struct {
 		u32	addr_lo;
 		u32	addr_hi;
-	} a32;
-};
+	} __packed a32;
+} __packed;
 
 /* Generic DMA addr-len pair. */
 struct bfi_alen {
 	union bfi_addr_u	al_addr;	/* DMA addr of buffer	*/
 	u32			al_len;		/* length of buffer */
-};
+} __packed;
 
 /*
  * Large Message structure - 128 Bytes size Msgs
@@ -96,7 +94,7 @@
 struct bfi_mbmsg {
 	struct bfi_mhdr mh;
 	u32		pl[BFI_MBMSG_SZ];
-};
+} __packed;
 
 /* Supported PCI function class codes (personality) */
 enum bfi_pcifn_class {
@@ -184,19 +182,19 @@
 struct bfi_ioc_getattr_req {
 	struct bfi_mhdr mh;
 	union bfi_addr_u	attr_addr;
-};
+} __packed;
 
 struct bfi_ioc_attr {
 	u64		mfg_pwwn;	/*!< Mfg port wwn	   */
 	u64		mfg_nwwn;	/*!< Mfg node wwn	   */
-	mac_t		mfg_mac;	/*!< Mfg mac		   */
+	u8		mfg_mac[ETH_ALEN]; /*!< Mfg mac		   */
 	u8		port_mode;	/* enum bfi_port_mode	   */
 	u8		rsvd_a;
 	u64		pwwn;
 	u64		nwwn;
-	mac_t		mac;		/*!< PBC or Mfg mac	   */
+	u8		mac[ETH_ALEN];	/*!< PBC or Mfg mac	   */
 	u16	rsvd_b;
-	mac_t		fcoe_mac;
+	u8		fcoe_mac[ETH_ALEN];
 	u16	rsvd_c;
 	char		brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
 	u8		pcie_gen;
@@ -211,14 +209,14 @@
 	char		optrom_version[BFA_VERSION_LEN];
 	struct bfa_mfg_vpd vpd;
 	u32	card_type;	/*!< card type			*/
-};
+} __packed;
 
 /* BFI_IOC_I2H_GETATTR_REPLY message */
 struct bfi_ioc_getattr_reply {
 	struct bfi_mhdr mh;	/*!< Common msg header		*/
 	u8			status;	/*!< cfg reply status		*/
 	u8			rsvd[3];
-};
+} __packed;
 
 /* Firmware memory page offsets */
 #define BFI_IOC_SMEM_PG0_CB	(0x40)
@@ -256,7 +254,7 @@
 	u8 build;
 	u8 rsvd[2];
 #endif
-};
+} __packed;
 
 struct bfi_ioc_image_hdr {
 	u32	signature;	/*!< constant signature */
@@ -269,7 +267,7 @@
 	u32	rsvd_b[2];
 	struct bfi_ioc_fwver fwver;
 	u32	md5sum[BFI_IOC_MD5SUM_SZ];
-};
+} __packed;
 
 enum bfi_ioc_img_ver_cmp {
 	BFI_IOC_IMG_VER_INCOMP,
@@ -301,7 +299,7 @@
 struct bfi_ioc_hbeat {
 	struct bfi_mhdr mh;		/*!< common msg header		*/
 	u32	   hb_count;	/*!< current heart beat count	*/
-};
+} __packed;
 
 /* IOC hardware/firmware state */
 enum bfi_ioc_state {
@@ -317,8 +315,6 @@
 	BFI_IOC_MEMTEST		= 9,	/*!< IOC is doing memtest	     */
 };
 
-#define BFI_IOC_ENDIAN_SIG  0x12345678
-
 enum {
 	BFI_ADAPTER_TYPE_FC	= 0x01,		/*!< FC adapters	   */
 	BFI_ADAPTER_TYPE_MK	= 0x0f0000,	/*!< adapter type mask     */
@@ -337,12 +333,6 @@
 		BFI_ADAPTER_ ## __prop ## _SH)
 #define BFI_ADAPTER_SETP(__prop, __val)				\
 	((__val) << BFI_ADAPTER_ ## __prop ## _SH)
-#define BFI_ADAPTER_IS_PROTO(__adap_type)			\
-	((__adap_type) & BFI_ADAPTER_PROTO)
-#define BFI_ADAPTER_IS_TTV(__adap_type)				\
-	((__adap_type) & BFI_ADAPTER_TTV)
-#define BFI_ADAPTER_IS_UNSUPP(__adap_type)			\
-	((__adap_type) & BFI_ADAPTER_UNSUPP)
 #define BFI_ADAPTER_IS_SPECIAL(__adap_type)			\
 	((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO |	\
 			BFI_ADAPTER_UNSUPP))
@@ -353,7 +343,7 @@
 	u16			clscode;
 	u16			rsvd;
 	u32		tv_sec;
-};
+} __packed;
 
 /* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
 struct bfi_ioc_ctrl_reply {
@@ -362,7 +352,7 @@
 	u8			port_mode;	/*!< enum bfa_mode */
 	u8			cap_bm;		/*!< capability bit mask */
 	u8			rsvd;
-};
+} __packed;
 
 #define BFI_IOC_MSGSZ   8
 /* H2I Messages */
@@ -372,14 +362,14 @@
 	struct bfi_ioc_ctrl_req disable_req;
 	struct bfi_ioc_getattr_req getattr_req;
 	u32			mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
 
 /* I2H Messages */
 union bfi_ioc_i2h_msg_u {
 	struct bfi_mhdr mh;
 	struct bfi_ioc_ctrl_reply fw_event;
 	u32			mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
 
 /*----------------------------------------------------------------------
  *				MSGQ
@@ -408,7 +398,7 @@
 	u16	num_entries;
 	u8	enet_id;
 	u8	rsvd[1];
-};
+} __packed;
 
 #define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do {	\
 	(_mh).msg_class	 = (_mc);	\
@@ -430,21 +420,21 @@
 	union bfi_addr_u addr;
 	u16 q_depth;     /* Total num of entries in the queue */
 	u8 rsvd[2];
-};
+} __packed;
 
 /* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
 struct bfi_msgq_cfg_req {
 	struct bfi_mhdr mh;
 	struct bfi_msgq cmdq;
 	struct bfi_msgq rspq;
-};
+} __packed;
 
 /* BFI_ENET_MSGQ_CFG_RSP */
 struct bfi_msgq_cfg_rsp {
 	struct bfi_mhdr mh;
 	u8 cmd_status;
 	u8 rsvd[3];
-};
+} __packed;
 
 /* BFI_MSGQ_H2I_DOORBELL */
 struct bfi_msgq_h2i_db {
@@ -452,8 +442,8 @@
 	union {
 		u16 cmdq_pi;
 		u16 rspq_ci;
-	} idx;
-};
+	} __packed idx;
+} __packed;
 
 /* BFI_MSGQ_I2H_DOORBELL */
 struct bfi_msgq_i2h_db {
@@ -461,8 +451,8 @@
 	union {
 		u16 rspq_pi;
 		u16 cmdq_ci;
-	} idx;
-};
+	} __packed idx;
+} __packed;
 
 #define BFI_CMD_COPY_SZ 28
 
@@ -470,14 +460,14 @@
 struct bfi_msgq_h2i_cmdq_copy_rsp {
 	struct bfi_mhdr mh;
 	u8	      data[BFI_CMD_COPY_SZ];
-};
+} __packed;
 
 /* BFI_MSGQ_I2H_CMD_COPY_REQ */
 struct bfi_msgq_i2h_cmdq_copy_req {
 	struct bfi_mhdr mh;
 	u16     offset;
 	u16     len;
-};
+} __packed;
 
 /*
  *      FLASH module specific
@@ -505,7 +495,7 @@
 struct bfi_flash_query_req {
 	struct bfi_mhdr mh;   /* Common msg header */
 	struct bfi_alen alen;
-};
+} __packed;
 
 /*
  * Flash write request
@@ -519,7 +509,7 @@
 	u8	rsv[2];
 	u32	offset;
 	u32	length;
-};
+} __packed;
 
 /*
  * Flash read request
@@ -532,7 +522,7 @@
 	u32	offset;
 	u32	length;
 	struct bfi_alen alen;
-};
+} __packed;
 
 /*
  * Flash query response
@@ -540,7 +530,7 @@
 struct bfi_flash_query_rsp {
 	struct bfi_mhdr mh;	/* Common msg header */
 	u32	status;
-};
+} __packed;
 
 /*
  * Flash read response
@@ -552,7 +542,7 @@
 	u8	rsv[3];
 	u32	status;
 	u32	length;
-};
+} __packed;
 
 /*
  * Flash write response
@@ -564,8 +554,6 @@
 	u8	rsv[3];
 	u32	status;
 	u32	length;
-};
-
-#pragma pack()
+} __packed;
 
 #endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index bd605be..fad6511 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -22,8 +22,6 @@
 #include "bfi.h"
 #include "bfa_defs_cna.h"
 
-#pragma pack(1)
-
 enum bfi_port_h2i {
 	BFI_PORT_H2I_ENABLE_REQ		= (1),
 	BFI_PORT_H2I_DISABLE_REQ	= (2),
@@ -43,7 +41,7 @@
 	struct bfi_mhdr mh;		/*!< msg header			    */
 	u32	msgtag;		/*!< msgtag for reply		    */
 	u32	rsvd;
-};
+} __packed;
 
 /* Generic RSP type */
 struct bfi_port_generic_rsp {
@@ -51,13 +49,13 @@
 	u8		status;		/*!< port enable status		    */
 	u8		rsvd[3];
 	u32	msgtag;		/*!< msgtag for reply		    */
-};
+} __packed;
 
 /* BFI_PORT_H2I_GET_STATS_REQ */
 struct bfi_port_get_stats_req {
 	struct bfi_mhdr mh;		/*!< common msg header		    */
 	union bfi_addr_u   dma_addr;
-};
+} __packed;
 
 union bfi_port_h2i_msg_u {
 	struct bfi_mhdr mh;
@@ -65,7 +63,7 @@
 	struct bfi_port_generic_req disable_req;
 	struct bfi_port_get_stats_req getstats_req;
 	struct bfi_port_generic_req clearstats_req;
-};
+} __packed;
 
 union bfi_port_i2h_msg_u {
 	struct bfi_mhdr mh;
@@ -73,7 +71,7 @@
 	struct bfi_port_generic_rsp disable_rsp;
 	struct bfi_port_generic_rsp getstats_rsp;
 	struct bfi_port_generic_rsp clearstats_rsp;
-};
+} __packed;
 
 /* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
 enum bfi_cee_h2i_msgs {
@@ -97,7 +95,7 @@
  */
 struct bfi_lldp_reset_stats {
 	struct bfi_mhdr mh;
-};
+} __packed;
 
 /*
  * @brief H2I command structure for resetting the stats.
@@ -105,7 +103,7 @@
  */
 struct bfi_cee_reset_stats {
 	struct bfi_mhdr mh;
-};
+} __packed;
 
 /*
  * @brief  get configuration  command from host
@@ -114,7 +112,7 @@
 struct bfi_cee_get_req {
 	struct bfi_mhdr mh;
 	union bfi_addr_u   dma_addr;
-};
+} __packed;
 
 /*
  * @brief reply message from firmware
@@ -124,7 +122,7 @@
 	struct bfi_mhdr mh;
 	u8			cmd_status;
 	u8			rsvd[3];
-};
+} __packed;
 
 /*
  * @brief  get configuration  command from host
@@ -133,7 +131,7 @@
 struct bfi_cee_stats_req {
 	struct bfi_mhdr mh;
 	union bfi_addr_u   dma_addr;
-};
+} __packed;
 
 /*
  * @brief reply message from firmware
@@ -143,22 +141,20 @@
 	struct bfi_mhdr mh;
 	u8			cmd_status;
 	u8			rsvd[3];
-};
+} __packed;
 
 /* @brief mailbox command structures from host to firmware */
 union bfi_cee_h2i_msg_u {
 	struct bfi_mhdr mh;
 	struct bfi_cee_get_req get_req;
 	struct bfi_cee_stats_req stats_req;
-};
+} __packed;
 
 /* @brief mailbox message structures from firmware to host	*/
 union bfi_cee_i2h_msg_u {
 	struct bfi_mhdr mh;
 	struct bfi_cee_get_rsp get_rsp;
 	struct bfi_cee_stats_rsp stats_rsp;
-};
-
-#pragma pack()
+} __packed;
 
 #endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index bccca3b..d7be7ea8 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -36,8 +36,6 @@
 #include "bfa_defs.h"
 #include "bfi.h"
 
-#pragma pack(1)
-
 #define BFI_ENET_CFG_MAX		32	/* Max resources per PF */
 
 #define BFI_ENET_TXQ_PRIO_MAX		8
@@ -59,8 +57,8 @@
 	struct {
 		u32	addr_hi;	/* Most Significant 32-bits */
 		u32	addr_lo;	/* Least Significant 32-Bits */
-	} a32;
-};
+	} __packed a32;
+} __packed;
 
 /*	T X   Q U E U E   D E F I N E S      */
 /* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
@@ -70,13 +68,13 @@
 #define BFI_ENET_TXQ_WI_EXTENSION	(0x104)	/* Extension WI */
 
 /* TxQ Entry Control Flags */
-#define BFI_ENET_TXQ_WI_CF_FCOE_CRC	(1 << 8)
-#define BFI_ENET_TXQ_WI_CF_IPID_MODE	(1 << 5)
-#define BFI_ENET_TXQ_WI_CF_INS_PRIO	(1 << 4)
-#define BFI_ENET_TXQ_WI_CF_INS_VLAN	(1 << 3)
-#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM	(1 << 2)
-#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM	(1 << 1)
-#define BFI_ENET_TXQ_WI_CF_IP_CKSUM	(1 << 0)
+#define BFI_ENET_TXQ_WI_CF_FCOE_CRC	BIT(8)
+#define BFI_ENET_TXQ_WI_CF_IPID_MODE	BIT(5)
+#define BFI_ENET_TXQ_WI_CF_INS_PRIO	BIT(4)
+#define BFI_ENET_TXQ_WI_CF_INS_VLAN	BIT(3)
+#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM	BIT(2)
+#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM	BIT(1)
+#define BFI_ENET_TXQ_WI_CF_IP_CKSUM	BIT(0)
 
 struct bfi_enet_txq_wi_base {
 	u8			reserved;
@@ -88,28 +86,28 @@
 	u16			vlan_tag;
 	u16			lso_mss;	/* Only 14 LSB are valid */
 	u32			frame_length;	/* Only 24 LSB are valid */
-};
+} __packed;
 
 struct bfi_enet_txq_wi_ext {
 	u16			reserved;
 	u16			opcode;		/* BFI_ENET_TXQ_WI_EXTENSION */
 	u32			reserved2[3];
-};
+} __packed;
 
 struct bfi_enet_txq_wi_vector {			/* Tx Buffer Descriptor */
 	u16			reserved;
 	u16			length;		/* Only 14 LSB are valid */
 	union bfi_addr_be_u	addr;
-};
+} __packed;
 
 /*  TxQ Entry Structure  */
 struct bfi_enet_txq_entry {
 	union {
 		struct bfi_enet_txq_wi_base	base;
 		struct bfi_enet_txq_wi_ext	ext;
-	} wi;
+	} __packed wi;
 	struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
-};
+} __packed;
 
 #define wi_hdr		wi.base
 #define wi_ext_hdr	wi.ext
@@ -120,36 +118,36 @@
 /*   R X   Q U E U E   D E F I N E S   */
 struct bfi_enet_rxq_entry {
 	union bfi_addr_be_u  rx_buffer;
-};
+} __packed;
 
 /*   R X   C O M P L E T I O N   Q U E U E   D E F I N E S   */
 /* CQ Entry Flags */
-#define	BFI_ENET_CQ_EF_MAC_ERROR	(1 <<  0)
-#define	BFI_ENET_CQ_EF_FCS_ERROR	(1 <<  1)
-#define	BFI_ENET_CQ_EF_TOO_LONG		(1 <<  2)
-#define	BFI_ENET_CQ_EF_FC_CRC_OK	(1 <<  3)
+#define BFI_ENET_CQ_EF_MAC_ERROR	BIT(0)
+#define BFI_ENET_CQ_EF_FCS_ERROR	BIT(1)
+#define BFI_ENET_CQ_EF_TOO_LONG		BIT(2)
+#define BFI_ENET_CQ_EF_FC_CRC_OK	BIT(3)
 
-#define	BFI_ENET_CQ_EF_RSVD1		(1 <<  4)
-#define	BFI_ENET_CQ_EF_L4_CKSUM_OK	(1 <<  5)
-#define	BFI_ENET_CQ_EF_L3_CKSUM_OK	(1 <<  6)
-#define	BFI_ENET_CQ_EF_HDS_HEADER	(1 <<  7)
+#define BFI_ENET_CQ_EF_RSVD1		BIT(4)
+#define BFI_ENET_CQ_EF_L4_CKSUM_OK	BIT(5)
+#define BFI_ENET_CQ_EF_L3_CKSUM_OK	BIT(6)
+#define BFI_ENET_CQ_EF_HDS_HEADER	BIT(7)
 
-#define	BFI_ENET_CQ_EF_UDP		(1 <<  8)
-#define	BFI_ENET_CQ_EF_TCP		(1 <<  9)
-#define	BFI_ENET_CQ_EF_IP_OPTIONS	(1 << 10)
-#define	BFI_ENET_CQ_EF_IPV6		(1 << 11)
+#define BFI_ENET_CQ_EF_UDP		BIT(8)
+#define BFI_ENET_CQ_EF_TCP		BIT(9)
+#define BFI_ENET_CQ_EF_IP_OPTIONS	BIT(10)
+#define BFI_ENET_CQ_EF_IPV6		BIT(11)
 
-#define	BFI_ENET_CQ_EF_IPV4		(1 << 12)
-#define	BFI_ENET_CQ_EF_VLAN		(1 << 13)
-#define	BFI_ENET_CQ_EF_RSS		(1 << 14)
-#define	BFI_ENET_CQ_EF_RSVD2		(1 << 15)
+#define BFI_ENET_CQ_EF_IPV4		BIT(12)
+#define BFI_ENET_CQ_EF_VLAN		BIT(13)
+#define BFI_ENET_CQ_EF_RSS		BIT(14)
+#define BFI_ENET_CQ_EF_RSVD2		BIT(15)
 
-#define	BFI_ENET_CQ_EF_MCAST_MATCH	(1 << 16)
-#define	BFI_ENET_CQ_EF_MCAST		(1 << 17)
-#define BFI_ENET_CQ_EF_BCAST		(1 << 18)
-#define	BFI_ENET_CQ_EF_REMOTE		(1 << 19)
+#define BFI_ENET_CQ_EF_MCAST_MATCH	BIT(16)
+#define BFI_ENET_CQ_EF_MCAST		BIT(17)
+#define BFI_ENET_CQ_EF_BCAST		BIT(18)
+#define BFI_ENET_CQ_EF_REMOTE		BIT(19)
 
-#define	BFI_ENET_CQ_EF_LOCAL		(1 << 20)
+#define BFI_ENET_CQ_EF_LOCAL		BIT(20)
 
 /* CQ Entry Structure */
 struct bfi_enet_cq_entry {
@@ -161,7 +159,7 @@
 	u8	reserved1;
 	u8	reserved2;
 	u8	rxq_id;
-};
+} __packed;
 
 /*   E N E T   C O N T R O L   P A T H   C O M M A N D S   */
 struct bfi_enet_q {
@@ -169,23 +167,23 @@
 	union bfi_addr_u	first_entry;
 	u16		pages;	/* # of pages */
 	u16		page_sz;
-};
+} __packed;
 
 struct bfi_enet_txq {
 	struct bfi_enet_q	q;
 	u8			priority;
 	u8			rsvd[3];
-};
+} __packed;
 
 struct bfi_enet_rxq {
 	struct bfi_enet_q	q;
 	u16		rx_buffer_size;
 	u16		rsvd;
-};
+} __packed;
 
 struct bfi_enet_cq {
 	struct bfi_enet_q	q;
-};
+} __packed;
 
 struct bfi_enet_ib_cfg {
 	u8		int_pkt_dma;
@@ -198,16 +196,16 @@
 	u32	inter_pkt_timeout;
 	u8		inter_pkt_count;
 	u8		rsvd1[3];
-};
+} __packed;
 
 struct bfi_enet_ib {
 	union bfi_addr_u	index_addr;
 	union {
 		u16	msix_index;
 		u16	intx_bitmask;
-	} intr;
+	} __packed intr;
 	u16		rsvd;
-};
+} __packed;
 
 /* ENET command messages */
 enum bfi_enet_h2i_msgs {
@@ -355,7 +353,7 @@
  */
 struct bfi_enet_req {
 	struct bfi_msgq_mhdr mh;
-};
+} __packed;
 
 /* Enable/Disable Request
  *
@@ -370,7 +368,7 @@
 	struct		bfi_msgq_mhdr mh;
 	u8		enable;		/* 1 = enable;  0 = disable */
 	u8		rsvd[3];
-};
+} __packed;
 
 /* Generic Response */
 struct bfi_enet_rsp {
@@ -378,7 +376,7 @@
 	u8		error;		/*!< if error see cmd_offset */
 	u8		rsvd;
 	u16		cmd_offset;	/*!< offset to invalid parameter */
-};
+} __packed;
 
 /* GLOBAL CONFIGURATION */
 
@@ -387,7 +385,7 @@
  */
 struct bfi_enet_attr_req {
 	struct bfi_msgq_mhdr	mh;
-};
+} __packed;
 
 /* bfi_enet_attr_rsp is used by:
  *	BFI_ENET_I2H_GET_ATTR_RSP
@@ -400,7 +398,7 @@
 	u32		max_cfg;
 	u32		max_ucmac;
 	u32		rit_size;
-};
+} __packed;
 
 /* Tx Configuration
  *
@@ -421,7 +419,7 @@
 	u8		apply_vlan_filter;
 	u8		add_to_vswitch;
 	u8		rsvd1[1];
-};
+} __packed;
 
 struct bfi_enet_tx_cfg_req {
 	struct bfi_msgq_mhdr mh;
@@ -431,7 +429,7 @@
 	struct {
 		struct bfi_enet_txq	q;
 		struct bfi_enet_ib	ib;
-	} q_cfg[BFI_ENET_TXQ_PRIO_MAX];
+	} __packed q_cfg[BFI_ENET_TXQ_PRIO_MAX];
 
 	struct bfi_enet_ib_cfg	ib_cfg;
 
@@ -448,7 +446,7 @@
 		u32	i_dbell;	/* PCI base address offset */
 		u8	hw_qid;		/* For debugging */
 		u8	rsvd[3];
-	} q_handles[BFI_ENET_TXQ_PRIO_MAX];
+	} __packed q_handles[BFI_ENET_TXQ_PRIO_MAX];
 };
 
 /* Rx Configuration
@@ -481,13 +479,13 @@
 		u8			force_offset;
 		u8			type;
 		u8			rsvd1;
-	} hds;
+	} __packed hds;
 
 	u8		multi_buffer;
 	u8		strip_vlan;
 	u8		drop_untagged;
 	u8		rsvd2;
-};
+} __packed;
 
 /*
  * Multicast frames are received on the ql of q-set index zero.
@@ -504,12 +502,12 @@
 		struct bfi_enet_rxq	qs;	/* small/header buffers */
 		struct bfi_enet_cq	cq;
 		struct bfi_enet_ib	ib;
-	} q_cfg[BFI_ENET_RX_QSET_MAX];
+	} __packed q_cfg[BFI_ENET_RX_QSET_MAX];
 
 	struct bfi_enet_ib_cfg	ib_cfg;
 
 	struct bfi_enet_rx_cfg	rx_cfg;
-};
+} __packed;
 
 struct bfi_enet_rx_cfg_rsp {
 	struct bfi_msgq_mhdr mh;
@@ -524,8 +522,8 @@
 		u8		hw_sqid;  /* For debugging */
 		u8		hw_cqid;  /* For debugging */
 		u8		rsvd;
-	} q_handles[BFI_ENET_RX_QSET_MAX];
-};
+	} __packed q_handles[BFI_ENET_RX_QSET_MAX];
+} __packed;
 
 /* RIT
  *
@@ -537,7 +535,7 @@
 	u16	size;			/* number of table-entries used */
 	u8	rsvd[2];
 	u8	table[BFI_ENET_RSS_RIT_MAX];
-};
+} __packed;
 
 /* RSS
  *
@@ -556,12 +554,12 @@
 	u8	mask;
 	u8	rsvd[2];
 	u32	key[BFI_ENET_RSS_KEY_LEN];
-};
+} __packed;
 
 struct bfi_enet_rss_cfg_req {
 	struct bfi_msgq_mhdr	mh;
 	struct bfi_enet_rss_cfg	cfg;
-};
+} __packed;
 
 /* MAC Unicast
  *
@@ -573,16 +571,16 @@
  */
 struct bfi_enet_ucast_req {
 	struct bfi_msgq_mhdr	mh;
-	mac_t			mac_addr;
+	u8			mac_addr[ETH_ALEN];
 	u8			rsvd[2];
-};
+} __packed;
 
 /* MAC Unicast + VLAN */
 struct bfi_enet_mac_n_vlan_req {
 	struct bfi_msgq_mhdr	mh;
 	u16			vlan_id;
-	mac_t			mac_addr;
-};
+	u8			mac_addr[ETH_ALEN];
+} __packed;
 
 /* MAC Multicast
  *
@@ -591,9 +589,9 @@
  */
 struct bfi_enet_mcast_add_req {
 	struct bfi_msgq_mhdr	mh;
-	mac_t			mac_addr;
+	u8			mac_addr[ETH_ALEN];
 	u8			rsvd[2];
-};
+} __packed;
 
 /* bfi_enet_mac_mfilter_add_rsp is used by:
  *	BFI_ENET_I2H_MAC_MCAST_ADD_RSP
@@ -605,7 +603,7 @@
 	u16			cmd_offset;
 	u16			handle;
 	u8			rsvd1[2];
-};
+} __packed;
 
 /* bfi_enet_mac_mfilter_del_req is used by:
  *	BFI_ENET_H2I_MAC_MCAST_DEL_REQ
@@ -614,7 +612,7 @@
 	struct bfi_msgq_mhdr	mh;
 	u16			handle;
 	u8			rsvd[2];
-};
+} __packed;
 
 /* VLAN
  *
@@ -626,7 +624,7 @@
 	u8			block_idx;
 	u8			rsvd[3];
 	u32			bit_mask[BFI_ENET_VLAN_WORDS_MAX];
-};
+} __packed;
 
 /* PAUSE
  *
@@ -638,7 +636,7 @@
 	u8			rsvd[2];
 	u8			tx_pause;	/* 1 = enable;  0 = disable */
 	u8			rx_pause;	/* 1 = enable;  0 = disable */
-};
+} __packed;
 
 /* DIAGNOSTICS
  *
@@ -650,7 +648,7 @@
 	u8			rsvd[2];
 	u8			mode;		/* cable or Serdes */
 	u8			enable;		/* 1 = enable;  0 = disable */
-};
+} __packed;
 
 /* enum for Loopback opmodes */
 enum {
@@ -671,14 +669,14 @@
 	u32			rx_enet_mask;
 	u32			tx_enet_mask;
 	union bfi_addr_u	host_buffer;
-};
+} __packed;
 
 /* defines for "stats_mask" above. */
-#define BFI_ENET_STATS_MAC    (1 << 0)    /* !< MAC Statistics */
-#define BFI_ENET_STATS_BPC    (1 << 1)    /* !< Pause Stats from BPC */
-#define BFI_ENET_STATS_RAD    (1 << 2)    /* !< Rx Admission Statistics */
-#define BFI_ENET_STATS_RX_FC  (1 << 3)    /* !< Rx FC Stats from RxA */
-#define BFI_ENET_STATS_TX_FC  (1 << 4)    /* !< Tx FC Stats from TxA */
+#define BFI_ENET_STATS_MAC    BIT(0)    /* !< MAC Statistics */
+#define BFI_ENET_STATS_BPC    BIT(1)    /* !< Pause Stats from BPC */
+#define BFI_ENET_STATS_RAD    BIT(2)    /* !< Rx Admission Statistics */
+#define BFI_ENET_STATS_RX_FC  BIT(3)    /* !< Rx FC Stats from RxA */
+#define BFI_ENET_STATS_TX_FC  BIT(4)    /* !< Tx FC Stats from TxA */
 
 #define BFI_ENET_STATS_ALL    0x1f
 
@@ -699,7 +697,7 @@
 	u64 errors;
 	u64 filter_vlan;      /* frames filtered due to VLAN */
 	u64 filter_mac_sa;    /* frames filtered due to SA check */
-};
+} __packed;
 
 /* RxF Frame Statistics */
 struct bfi_enet_stats_rxf {
@@ -715,7 +713,7 @@
 	u64 bcast;
 	u64 bcast_vlan;
 	u64 frame_drops;
-};
+} __packed;
 
 /* FC Tx Frame Statistics */
 struct bfi_enet_stats_fc_tx {
@@ -734,7 +732,7 @@
 	u64 txf_parity_errors;
 	u64 txf_timeout;
 	u64 txf_fid_parity_errors;
-};
+} __packed;
 
 /* FC Rx Frame Statistics */
 struct bfi_enet_stats_fc_rx {
@@ -749,7 +747,7 @@
 	u64 rxf_bcast_octets;
 	u64 rxf_bcast;
 	u64 rxf_bcast_vlan;
-};
+} __packed;
 
 /* RAD Frame Statistics */
 struct bfi_enet_stats_rad {
@@ -770,7 +768,7 @@
 	u64 rx_bcast_vlan;
 
 	u64 rx_drops;
-};
+} __packed;
 
 /* BPC Tx Registers */
 struct bfi_enet_stats_bpc {
@@ -785,7 +783,7 @@
 	u64 rx_zero_pause[8];	/*!< Pause cancellation */
 	/*!<Pause initiation rather than retention */
 	u64 rx_first_pause[8];
-};
+} __packed;
 
 /* MAC Rx Statistics */
 struct bfi_enet_stats_mac {
@@ -838,7 +836,7 @@
 	u64 tx_oversize;
 	u64 tx_undersize;
 	u64 tx_fragments;
-};
+} __packed;
 
 /* Complete statistics, DMAed from fw to host followed by
  * BFI_ENET_I2H_STATS_GET_RSP
@@ -852,8 +850,6 @@
 	struct bfi_enet_stats_fc_tx	fc_tx_stats;
 	struct bfi_enet_stats_rxf	rxf_stats[BFI_ENET_CFG_MAX];
 	struct bfi_enet_stats_txf	txf_stats[BFI_ENET_CFG_MAX];
-};
-
-#pragma pack()
+} __packed;
 
 #endif  /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 8ba72b1..006dcad9 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -28,36 +28,8 @@
 
 /*  Macros and constants  */
 
-#define BNA_IOC_TIMER_FREQ		200
-
-/* Log string size */
-#define BNA_MESSAGE_SIZE		256
-
 #define bna_is_small_rxq(_id) ((_id) & 0x1)
 
-#define BNA_MAC_IS_EQUAL(_mac1, _mac2)					\
-	(!memcmp((_mac1), (_mac2), sizeof(mac_t)))
-
-#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
-
-#define BNA_TO_POWER_OF_2(x)						\
-do {									\
-	int _shift = 0;							\
-	while ((x) && (x) != 1) {					\
-		(x) >>= 1;						\
-		_shift++;						\
-	}								\
-	(x) <<= _shift;							\
-} while (0)
-
-#define BNA_TO_POWER_OF_2_HIGH(x)					\
-do {									\
-	int n = 1;							\
-	while (n < (x))							\
-		n <<= 1;						\
-	(x) = n;							\
-} while (0)
-
 /*
  * input : _addr-> os dma addr in host endian format,
  * output : _bna_dma_addr-> pointer to hw dma addr
@@ -80,62 +52,8 @@
 	| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff));	\
 } while (0)
 
-#define	containing_rec(addr, type, field)				\
-	((type *)((unsigned char *)(addr) -				\
-	(unsigned char *)(&((type *)0)->field)))
-
 #define BNA_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
 
-/* TxQ element is 64 bytes */
-#define BNA_TXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 6)
-#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 6)
-
-#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{									\
-	unsigned int page_index;	/* index within a page */	\
-	void *page_addr;						\
-	page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1);		\
-	(_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index);	\
-	page_addr = (_qpt_ptr)[((_qe_idx) >>  BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
-	(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
-}
-
-/* RxQ element is 8 bytes */
-#define BNA_RXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 3)
-#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 3)
-
-#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{									\
-	unsigned int page_index;	/* index within a page */	\
-	void *page_addr;						\
-	page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1);		\
-	(_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index);	\
-	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
-				BNA_RXQ_PAGE_INDEX_MAX_SHIFT)];		\
-	(_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
-}
-
-/* CQ element is 16 bytes */
-#define BNA_CQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 4)
-#define BNA_CQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 4)
-
-#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range)	\
-{									\
-	unsigned int page_index;	  /* index within a page */	\
-	void *page_addr;						\
-									\
-	page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1);		\
-	(_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index);		\
-	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
-				    BNA_CQ_PAGE_INDEX_MAX_SHIFT)];	\
-	(_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
-}
-
-#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base)			\
-	(&((_cast *)(_q_base))[(_qe_idx)])
-
-#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
-
 #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth)			\
 	((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
 
@@ -147,31 +65,10 @@
 #define BNA_QE_FREE_CNT(_q_ptr, _q_depth)				\
 	(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) &	\
 	 ((_q_depth) - 1))
-
 #define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth)				\
 	((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) &	\
 	 (_q_depth - 1))
 
-#define BNA_Q_GET_CI(_q_ptr)		((_q_ptr)->q.consumer_index)
-
-#define BNA_Q_GET_PI(_q_ptr)		((_q_ptr)->q.producer_index)
-
-#define BNA_Q_PI_ADD(_q_ptr, _num)					\
-	(_q_ptr)->q.producer_index =					\
-		(((_q_ptr)->q.producer_index + (_num)) &		\
-		((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_CI_ADD(_q_ptr, _num)					\
-	(_q_ptr)->q.consumer_index =					\
-		(((_q_ptr)->q.consumer_index + (_num))			\
-		& ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_FREE_COUNT(_q_ptr)					\
-	(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-
-#define BNA_Q_IN_USE_COUNT(_q_ptr)					\
-	(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
-
 #define BNA_LARGE_PKT_SIZE		1000
 
 #define BNA_UPDATE_PKT_CNT(_pkt, _len)					\
@@ -222,21 +119,6 @@
 	}								\
 } while (0)
 
-#define	call_rxf_pause_cbfn(rxf)					\
-do {									\
-	if ((rxf)->oper_state_cbfn) {					\
-		void (*cbfn)(struct bnad *, struct bna_rx *);	\
-		struct bnad *cbarg;					\
-		cbfn = (rxf)->oper_state_cbfn;				\
-		cbarg = (rxf)->oper_state_cbarg;			\
-		(rxf)->oper_state_cbfn = NULL;				\
-		(rxf)->oper_state_cbarg = NULL;				\
-		cbfn(cbarg, rxf->rx);					\
-	}								\
-} while (0)
-
-#define	call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
-
 #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
 
 #define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
@@ -326,28 +208,24 @@
 #define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
 
 #define bna_tx_from_rid(_bna, _rid, _tx)				\
-do {								    \
-	struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;	  \
-	struct bna_tx *__tx;					    \
-	struct list_head *qe;					   \
-	_tx = NULL;						     \
-	list_for_each(qe, &__tx_mod->tx_active_q) {		     \
-		__tx = (struct bna_tx *)qe;			     \
-		if (__tx->rid == (_rid)) {			      \
-			(_tx) = __tx;				   \
-			break;					  \
-		}						       \
-	}							       \
+do {									\
+	struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;			\
+	struct bna_tx *__tx;						\
+	_tx = NULL;							\
+	list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) {		\
+		if (__tx->rid == (_rid)) {				\
+			(_tx) = __tx;					\
+			break;						\
+		}							\
+	}								\
 } while (0)
 
 #define bna_rx_from_rid(_bna, _rid, _rx)				\
 do {									\
 	struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod;			\
 	struct bna_rx *__rx;						\
-	struct list_head *qe;						\
 	_rx = NULL;							\
-	list_for_each(qe, &__rx_mod->rx_active_q) {			\
-		__rx = (struct bna_rx *)qe;				\
+	list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) {		\
 		if (__rx->rid == (_rid)) {				\
 			(_rx) = __rx;					\
 			break;						\
@@ -365,17 +243,14 @@
 
 /*  Inline functions  */
 
-static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
+static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
 {
-	struct bna_mac *mac = NULL;
-	struct list_head *qe;
-	list_for_each(qe, q) {
-		if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
-			mac = (struct bna_mac *)qe;
-			break;
-		}
-	}
-	return mac;
+	struct bna_mac *mac;
+
+	list_for_each_entry(mac, q, qe)
+		if (ether_addr_equal(mac->addr, addr))
+			return mac;
+	return NULL;
 }
 
 #define bna_attr(_bna) (&(_bna)->ioceth.attr)
@@ -401,7 +276,6 @@
 
 /* APIs for RxF */
 struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
-void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
 struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
 void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
 			  struct bna_mcam_handle *handle);
@@ -488,31 +362,19 @@
 void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
 void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
 void bna_rx_dim_update(struct bna_ccb *ccb);
-enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
-		 void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
-		 void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
-		 void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
-		     void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
-		 void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
-		     void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac);
+enum bna_cb_status bna_rx_ucast_listset(struct bna_rx *rx, int count,
+					const u8 *uclist);
+enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, const u8 *mcmac,
+				    void (*cbfn)(struct bnad *,
+						 struct bna_rx *));
+enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count,
+					const u8 *mcmac);
 void
-bna_rx_mcast_delall(struct bna_rx *rx,
-		    void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_mcast_delall(struct bna_rx *rx);
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
-		enum bna_rxmode bitmask,
-		void (*cbfn)(struct bnad *, struct bna_rx *));
+		enum bna_rxmode bitmask);
 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlanfilter_enable(struct bna_rx *rx);
@@ -532,11 +394,10 @@
 void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
 		      void (*cbfn)(void *));
 void bna_enet_pause_config(struct bna_enet *enet,
-			   struct bna_pause_config *pause_config,
-			   void (*cbfn)(struct bnad *));
+			   struct bna_pause_config *pause_config);
 void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
 		      void (*cbfn)(struct bnad *));
-void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac);
 
 /* IOCETH */
 
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index deb8da6..4e5c387 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -207,7 +207,7 @@
 	for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
 		stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
 		memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
-		if (rx_enet_mask & ((u32)(1 << i))) {
+		if (rx_enet_mask & BIT(i)) {
 			int k;
 			count = sizeof(struct bfi_enet_stats_rxf) /
 				sizeof(u64);
@@ -222,7 +222,7 @@
 	for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
 		stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
 		memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
-		if (tx_enet_mask & ((u32)(1 << i))) {
+		if (tx_enet_mask & BIT(i)) {
 			int k;
 			count = sizeof(struct bfi_enet_stats_txf) /
 				sizeof(u64);
@@ -884,16 +884,6 @@
 	}								\
 } while (0)
 
-#define call_enet_pause_cbfn(enet)					\
-do {									\
-	if ((enet)->pause_cbfn) {					\
-		void (*cbfn)(struct bnad *);				\
-		cbfn = (enet)->pause_cbfn;				\
-		(enet)->pause_cbfn = NULL;				\
-		cbfn((enet)->bna->bnad);				\
-	}								\
-} while (0)
-
 #define call_enet_mtu_cbfn(enet)					\
 do {									\
 	if ((enet)->mtu_cbfn) {						\
@@ -925,7 +915,6 @@
 static void
 bna_enet_sm_stopped_entry(struct bna_enet *enet)
 {
-	call_enet_pause_cbfn(enet);
 	call_enet_mtu_cbfn(enet);
 	call_enet_stop_cbfn(enet);
 }
@@ -947,7 +936,6 @@
 		break;
 
 	case ENET_E_PAUSE_CFG:
-		call_enet_pause_cbfn(enet);
 		break;
 
 	case ENET_E_MTU_CFG:
@@ -1039,7 +1027,6 @@
 	 * NOTE: Do not call bna_enet_chld_start() here, since it will be
 	 * inadvertently called during cfg_wait->started transition as well
 	 */
-	call_enet_pause_cbfn(enet);
 	call_enet_mtu_cbfn(enet);
 }
 
@@ -1211,8 +1198,6 @@
 	enet->stop_cbfn = NULL;
 	enet->stop_cbarg = NULL;
 
-	enet->pause_cbfn = NULL;
-
 	enet->mtu_cbfn = NULL;
 
 	bfa_fsm_set_state(enet, bna_enet_sm_stopped);
@@ -1308,13 +1293,10 @@
 
 void
 bna_enet_pause_config(struct bna_enet *enet,
-		      struct bna_pause_config *pause_config,
-		      void (*cbfn)(struct bnad *))
+		      struct bna_pause_config *pause_config)
 {
 	enet->pause_config = *pause_config;
 
-	enet->pause_cbfn = cbfn;
-
 	bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
 }
 
@@ -1330,9 +1312,9 @@
 }
 
 void
-bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
+bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
 {
-	*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
+	bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
 }
 
 /* IOCETH */
@@ -1810,17 +1792,13 @@
 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
 
 	INIT_LIST_HEAD(&ucam_mod->free_q);
-	for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
-		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+	for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
 		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
-	}
 
 	/* A separate queue to allow synchronous setting of a list of MACs */
 	INIT_LIST_HEAD(&ucam_mod->del_q);
-	for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
-		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+	for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
 		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
-	}
 
 	ucam_mod->bna = bna;
 }
@@ -1828,17 +1806,6 @@
 static void
 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
 {
-	struct list_head *qe;
-	int i;
-
-	i = 0;
-	list_for_each(qe, &ucam_mod->free_q)
-		i++;
-
-	i = 0;
-	list_for_each(qe, &ucam_mod->del_q)
-		i++;
-
 	ucam_mod->bna = NULL;
 }
 
@@ -1852,27 +1819,21 @@
 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
 
 	INIT_LIST_HEAD(&mcam_mod->free_q);
-	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
-		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
 		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
-	}
 
 	mcam_mod->mchandle = (struct bna_mcam_handle *)
 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
 
 	INIT_LIST_HEAD(&mcam_mod->free_handle_q);
-	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
-		bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
+	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
 		list_add_tail(&mcam_mod->mchandle[i].qe,
-				&mcam_mod->free_handle_q);
-	}
+			      &mcam_mod->free_handle_q);
 
 	/* A separate queue to allow synchronous setting of a list of MACs */
 	INIT_LIST_HEAD(&mcam_mod->del_q);
-	for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
-		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+	for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
 		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
-	}
 
 	mcam_mod->bna = bna;
 }
@@ -1880,18 +1841,6 @@
 static void
 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
 {
-	struct list_head *qe;
-	int i;
-
-	i = 0;
-	list_for_each(qe, &mcam_mod->free_q) i++;
-
-	i = 0;
-	list_for_each(qe, &mcam_mod->del_q) i++;
-
-	i = 0;
-	list_for_each(qe, &mcam_mod->free_handle_q) i++;
-
 	mcam_mod->bna = NULL;
 }
 
@@ -2108,32 +2057,26 @@
 struct bna_mac *
 bna_cam_mod_mac_get(struct list_head *head)
 {
-	struct list_head *qe;
+	struct bna_mac *mac;
 
-	if (list_empty(head))
-		return NULL;
+	mac = list_first_entry_or_null(head, struct bna_mac, qe);
+	if (mac)
+		list_del(&mac->qe);
 
-	bfa_q_deq(head, &qe);
-	return (struct bna_mac *)qe;
-}
-
-void
-bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
-{
-	list_add_tail(&mac->qe, tail);
+	return mac;
 }
 
 struct bna_mcam_handle *
 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
 {
-	struct list_head *qe;
+	struct bna_mcam_handle *handle;
 
-	if (list_empty(&mcam_mod->free_handle_q))
-		return NULL;
+	handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
+					  struct bna_mcam_handle, qe);
+	if (handle)
+		list_del(&handle->qe);
 
-	bfa_q_deq(&mcam_mod->free_handle_q, &qe);
-
-	return (struct bna_mcam_handle *)qe;
+	return handle;
 }
 
 void
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 174af0e..52b45c9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -213,7 +213,7 @@
  * 15 bits (32K) should  be large enough to accumulate, anyways, and the max.
  * acked events to h/w can be (32K + max poll weight) (currently 64).
  */
-#define	BNA_IB_MAX_ACK_EVENTS		(1 << 15)
+#define BNA_IB_MAX_ACK_EVENTS		BIT(15)
 
 /* These macros build the data portion of the TxQ/RxQ doorbell */
 #define BNA_DOORBELL_Q_PRD_IDX(_pi)	(0x80000000 | (_pi))
@@ -282,13 +282,13 @@
 #define BNA_TXQ_WI_EXTENSION		(0x104)	/* Extension WI */
 
 /* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC		(1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE		(1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO		(1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN		(1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM		(1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM		(1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM		(1 << 0)
+#define BNA_TXQ_WI_CF_FCOE_CRC		BIT(8)
+#define BNA_TXQ_WI_CF_IPID_MODE		BIT(5)
+#define BNA_TXQ_WI_CF_INS_PRIO		BIT(4)
+#define BNA_TXQ_WI_CF_INS_VLAN		BIT(3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM		BIT(2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM		BIT(1)
+#define BNA_TXQ_WI_CF_IP_CKSUM		BIT(0)
 
 #define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
 		(((_hdr_size) << 10) | ((_offset) & 0x3FF))
@@ -297,36 +297,36 @@
  * Completion Q defines
  */
 /* CQ Entry Flags */
-#define	BNA_CQ_EF_MAC_ERROR	(1 <<  0)
-#define	BNA_CQ_EF_FCS_ERROR	(1 <<  1)
-#define	BNA_CQ_EF_TOO_LONG	(1 <<  2)
-#define	BNA_CQ_EF_FC_CRC_OK	(1 <<  3)
+#define BNA_CQ_EF_MAC_ERROR	BIT(0)
+#define BNA_CQ_EF_FCS_ERROR	BIT(1)
+#define BNA_CQ_EF_TOO_LONG	BIT(2)
+#define BNA_CQ_EF_FC_CRC_OK	BIT(3)
 
-#define	BNA_CQ_EF_RSVD1		(1 <<  4)
-#define	BNA_CQ_EF_L4_CKSUM_OK	(1 <<  5)
-#define	BNA_CQ_EF_L3_CKSUM_OK	(1 <<  6)
-#define	BNA_CQ_EF_HDS_HEADER	(1 <<  7)
+#define BNA_CQ_EF_RSVD1		BIT(4)
+#define BNA_CQ_EF_L4_CKSUM_OK	BIT(5)
+#define BNA_CQ_EF_L3_CKSUM_OK	BIT(6)
+#define BNA_CQ_EF_HDS_HEADER	BIT(7)
 
-#define	BNA_CQ_EF_UDP		(1 <<  8)
-#define	BNA_CQ_EF_TCP		(1 <<  9)
-#define	BNA_CQ_EF_IP_OPTIONS	(1 << 10)
-#define	BNA_CQ_EF_IPV6		(1 << 11)
+#define BNA_CQ_EF_UDP		BIT(8)
+#define BNA_CQ_EF_TCP		BIT(9)
+#define BNA_CQ_EF_IP_OPTIONS	BIT(10)
+#define BNA_CQ_EF_IPV6		BIT(11)
 
-#define	BNA_CQ_EF_IPV4		(1 << 12)
-#define	BNA_CQ_EF_VLAN		(1 << 13)
-#define	BNA_CQ_EF_RSS		(1 << 14)
-#define	BNA_CQ_EF_RSVD2		(1 << 15)
+#define BNA_CQ_EF_IPV4		BIT(12)
+#define BNA_CQ_EF_VLAN		BIT(13)
+#define BNA_CQ_EF_RSS		BIT(14)
+#define BNA_CQ_EF_RSVD2		BIT(15)
 
-#define	BNA_CQ_EF_MCAST_MATCH   (1 << 16)
-#define	BNA_CQ_EF_MCAST		(1 << 17)
-#define BNA_CQ_EF_BCAST		(1 << 18)
-#define	BNA_CQ_EF_REMOTE	(1 << 19)
+#define BNA_CQ_EF_MCAST_MATCH   BIT(16)
+#define BNA_CQ_EF_MCAST		BIT(17)
+#define BNA_CQ_EF_BCAST		BIT(18)
+#define BNA_CQ_EF_REMOTE	BIT(19)
 
-#define	BNA_CQ_EF_LOCAL		(1 << 20)
+#define BNA_CQ_EF_LOCAL		BIT(20)
 /* CAT2 ASIC does not use bit 21 as per the SPEC.
  * Bit 31 is set in every end of frame completion
  */
-#define BNA_CQ_EF_EOP		(1 << 31)
+#define BNA_CQ_EF_EOP		BIT(31)
 
 /* Data structures */
 
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 8ab3a5f..5d0753c 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -46,7 +46,6 @@
 
 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
-static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
@@ -60,14 +59,10 @@
 
 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
 			enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
-			enum bna_rxf_event);
 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
 			enum bna_rxf_event);
 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
 			enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
-			enum bna_rxf_event);
 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
 			enum bna_rxf_event);
 
@@ -82,11 +77,7 @@
 {
 	switch (event) {
 	case RXF_E_START:
-		if (rxf->flags & BNA_RXF_F_PAUSED) {
-			bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
-			call_rxf_start_cbfn(rxf);
-		} else
-			bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+		bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
 		break;
 
 	case RXF_E_STOP:
@@ -101,45 +92,6 @@
 		call_rxf_cam_fltr_cbfn(rxf);
 		break;
 
-	case RXF_E_PAUSE:
-		rxf->flags |= BNA_RXF_F_PAUSED;
-		call_rxf_pause_cbfn(rxf);
-		break;
-
-	case RXF_E_RESUME:
-		rxf->flags &= ~BNA_RXF_F_PAUSED;
-		call_rxf_resume_cbfn(rxf);
-		break;
-
-	default:
-		bfa_sm_fault(event);
-	}
-}
-
-static void
-bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
-{
-	call_rxf_pause_cbfn(rxf);
-}
-
-static void
-bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
-	switch (event) {
-	case RXF_E_STOP:
-	case RXF_E_FAIL:
-		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
-		break;
-
-	case RXF_E_CONFIG:
-		call_rxf_cam_fltr_cbfn(rxf);
-		break;
-
-	case RXF_E_RESUME:
-		rxf->flags &= ~BNA_RXF_F_PAUSED;
-		bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
-		break;
-
 	default:
 		bfa_sm_fault(event);
 	}
@@ -166,7 +118,6 @@
 		bna_rxf_cfg_reset(rxf);
 		call_rxf_start_cbfn(rxf);
 		call_rxf_cam_fltr_cbfn(rxf);
-		call_rxf_resume_cbfn(rxf);
 		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 		break;
 
@@ -174,12 +125,6 @@
 		/* No-op */
 		break;
 
-	case RXF_E_PAUSE:
-		rxf->flags |= BNA_RXF_F_PAUSED;
-		call_rxf_start_cbfn(rxf);
-		bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
-		break;
-
 	case RXF_E_FW_RESP:
 		if (!bna_rxf_cfg_apply(rxf)) {
 			/* No more pending config updates */
@@ -197,7 +142,6 @@
 {
 	call_rxf_start_cbfn(rxf);
 	call_rxf_cam_fltr_cbfn(rxf);
-	call_rxf_resume_cbfn(rxf);
 }
 
 static void
@@ -214,41 +158,6 @@
 		bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
 		break;
 
-	case RXF_E_PAUSE:
-		rxf->flags |= BNA_RXF_F_PAUSED;
-		if (!bna_rxf_fltr_clear(rxf))
-			bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
-		else
-			bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
-		break;
-
-	default:
-		bfa_sm_fault(event);
-	}
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
-{
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
-	switch (event) {
-	case RXF_E_FAIL:
-		bna_rxf_cfg_reset(rxf);
-		call_rxf_pause_cbfn(rxf);
-		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
-		break;
-
-	case RXF_E_FW_RESP:
-		if (!bna_rxf_fltr_clear(rxf)) {
-			/* No more pending CAM entries to clear */
-			bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
-		}
-		break;
-
 	default:
 		bfa_sm_fault(event);
 	}
@@ -283,7 +192,7 @@
 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
 	req->mh.num_entries = htons(
 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
-	memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+	ether_addr_copy(req->mac_addr, mac->addr);
 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 		sizeof(struct bfi_enet_ucast_req), &req->mh);
 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -299,7 +208,7 @@
 		0, rxf->rx->rid);
 	req->mh.num_entries = htons(
 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
-	memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+	ether_addr_copy(req->mac_addr, mac->addr);
 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 		sizeof(struct bfi_enet_mcast_add_req), &req->mh);
 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -444,22 +353,17 @@
 
 /* This function gets the multicast MAC that has already been added to CAM */
 static struct bna_mac *
-bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
+bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr)
 {
 	struct bna_mac *mac;
-	struct list_head *qe;
 
-	list_for_each(qe, &rxf->mcast_active_q) {
-		mac = (struct bna_mac *)qe;
-		if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+	list_for_each_entry(mac, &rxf->mcast_active_q, qe)
+		if (ether_addr_equal(mac->addr, mac_addr))
 			return mac;
-	}
 
-	list_for_each(qe, &rxf->mcast_pending_del_q) {
-		mac = (struct bna_mac *)qe;
-		if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+	list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
+		if (ether_addr_equal(mac->addr, mac_addr))
 			return mac;
-	}
 
 	return NULL;
 }
@@ -468,13 +372,10 @@
 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
 {
 	struct bna_mcam_handle *mchandle;
-	struct list_head *qe;
 
-	list_for_each(qe, &rxf->mcast_handle_q) {
-		mchandle = (struct bna_mcam_handle *)qe;
+	list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
 		if (mchandle->handle == handle)
 			return mchandle;
-	}
 
 	return NULL;
 }
@@ -515,7 +416,6 @@
 			ret = 1;
 		}
 		list_del(&mchandle->qe);
-		bfa_q_qe_init(&mchandle->qe);
 		bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
 	}
 	mac->handle = NULL;
@@ -527,26 +427,23 @@
 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
 {
 	struct bna_mac *mac = NULL;
-	struct list_head *qe;
 	int ret;
 
 	/* First delete multicast entries to maintain the count */
 	while (!list_empty(&rxf->mcast_pending_del_q)) {
-		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
+		mac = list_first_entry(&rxf->mcast_pending_del_q,
+				       struct bna_mac, qe);
 		ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
-		bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+		list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
 		if (ret)
 			return ret;
 	}
 
 	/* Add multicast entries */
 	if (!list_empty(&rxf->mcast_pending_add_q)) {
-		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
-		list_add_tail(&mac->qe, &rxf->mcast_active_q);
+		mac = list_first_entry(&rxf->mcast_pending_add_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, &rxf->mcast_active_q);
 		bna_bfi_mcast_add_req(rxf, mac);
 		return 1;
 	}
@@ -566,7 +463,7 @@
 			block_idx++;
 			vlan_pending_bitmask >>= 1;
 		}
-		rxf->vlan_pending_bitmask &= ~(1 << block_idx);
+		rxf->vlan_pending_bitmask &= ~BIT(block_idx);
 		bna_bfi_rx_vlan_filter_set(rxf, block_idx);
 		return 1;
 	}
@@ -577,27 +474,24 @@
 static int
 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
 {
-	struct list_head *qe;
 	struct bna_mac *mac;
 	int ret;
 
 	/* Throw away delete pending mcast entries */
 	while (!list_empty(&rxf->mcast_pending_del_q)) {
-		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
+		mac = list_first_entry(&rxf->mcast_pending_del_q,
+				       struct bna_mac, qe);
 		ret = bna_rxf_mcast_del(rxf, mac, cleanup);
-		bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+		list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
 		if (ret)
 			return ret;
 	}
 
 	/* Move active mcast entries to pending_add_q */
 	while (!list_empty(&rxf->mcast_active_q)) {
-		bfa_q_deq(&rxf->mcast_active_q, &qe);
-		bfa_q_qe_init(qe);
-		list_add_tail(qe, &rxf->mcast_pending_add_q);
-		mac = (struct bna_mac *)qe;
+		mac = list_first_entry(&rxf->mcast_active_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
 		if (bna_rxf_mcast_del(rxf, mac, cleanup))
 			return 1;
 	}
@@ -658,25 +552,6 @@
 	return 0;
 }
 
-/* Only software reset */
-static int
-bna_rxf_fltr_clear(struct bna_rxf *rxf)
-{
-	if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
-		return 1;
-
-	if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
-		return 1;
-
-	if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
-		return 1;
-
-	if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
-		return 1;
-
-	return 0;
-}
-
 static void
 bna_rxf_cfg_reset(struct bna_rxf *rxf)
 {
@@ -693,16 +568,13 @@
 {
 	struct bna_rx *rx = rxf->rx;
 	struct bna_rxp *rxp;
-	struct list_head *qe;
 	int offset = 0;
 
 	rxf->rit_size = rit_size;
-	list_for_each(qe, &rx->rxp_q) {
-		rxp = (struct bna_rxp *)qe;
+	list_for_each_entry(rxp, &rx->rxp_q, qe) {
 		rxf->rit[offset] = rxp->cq.ccb->id;
 		offset++;
 	}
-
 }
 
 void
@@ -760,9 +632,6 @@
 	INIT_LIST_HEAD(&rxf->mcast_active_q);
 	INIT_LIST_HEAD(&rxf->mcast_handle_q);
 
-	if (q_config->paused)
-		rxf->flags |= BNA_RXF_F_PAUSED;
-
 	rxf->rit = (u8 *)
 		res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
 	bna_rit_init(rxf, q_config->num_paths);
@@ -795,22 +664,21 @@
 	rxf->ucast_active_set = 0;
 
 	while (!list_empty(&rxf->ucast_pending_add_q)) {
-		bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
-		bfa_q_qe_init(&mac->qe);
-		bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
+		mac = list_first_entry(&rxf->ucast_pending_add_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
 	}
 
 	if (rxf->ucast_pending_mac) {
-		bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
-		bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
-				    rxf->ucast_pending_mac);
+		list_add_tail(&rxf->ucast_pending_mac->qe,
+			      bna_ucam_mod_free_q(rxf->rx->bna));
 		rxf->ucast_pending_mac = NULL;
 	}
 
 	while (!list_empty(&rxf->mcast_pending_add_q)) {
-		bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
-		bfa_q_qe_init(&mac->qe);
-		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+		mac = list_first_entry(&rxf->mcast_pending_add_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
 	}
 
 	rxf->rxmode_pending = 0;
@@ -823,8 +691,6 @@
 	rxf->rss_pending = 0;
 	rxf->vlan_strip_pending = false;
 
-	rxf->flags = 0;
-
 	rxf->rx = NULL;
 }
 
@@ -863,8 +729,7 @@
 }
 
 enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
-		 void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac)
 {
 	struct bna_rxf *rxf = &rx->rxf;
 
@@ -873,12 +738,11 @@
 			bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
 		if (rxf->ucast_pending_mac == NULL)
 			return BNA_CB_UCAST_CAM_FULL;
-		bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
 	}
 
-	memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
+	ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
 	rxf->ucast_pending_set = 1;
-	rxf->cam_fltr_cbfn = cbfn;
+	rxf->cam_fltr_cbfn = NULL;
 	rxf->cam_fltr_cbarg = rx->bna->bnad;
 
 	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
@@ -887,7 +751,7 @@
 }
 
 enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr,
 		 void (*cbfn)(struct bnad *, struct bna_rx *))
 {
 	struct bna_rxf *rxf = &rx->rxf;
@@ -904,8 +768,7 @@
 	mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
 	if (mac == NULL)
 		return BNA_CB_MCAST_LIST_FULL;
-	bfa_q_qe_init(&mac->qe);
-	memcpy(mac->addr, addr, ETH_ALEN);
+	ether_addr_copy(mac->addr, addr);
 	list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
 
 	rxf->cam_fltr_cbfn = cbfn;
@@ -917,35 +780,31 @@
 }
 
 enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
-		     void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist)
 {
 	struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
 	struct bna_rxf *rxf = &rx->rxf;
 	struct list_head list_head;
-	struct list_head *qe;
-	u8 *mcaddr;
+	const u8 *mcaddr;
 	struct bna_mac *mac, *del_mac;
 	int i;
 
 	/* Purge the pending_add_q */
 	while (!list_empty(&rxf->ucast_pending_add_q)) {
-		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
-		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+		mac = list_first_entry(&rxf->ucast_pending_add_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, &ucam_mod->free_q);
 	}
 
 	/* Schedule active_q entries for deletion */
 	while (!list_empty(&rxf->ucast_active_q)) {
-		bfa_q_deq(&rxf->ucast_active_q, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-
+		mac = list_first_entry(&rxf->ucast_active_q,
+				       struct bna_mac, qe);
 		del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
-		memcpy(del_mac, mac, sizeof(*del_mac));
+		ether_addr_copy(del_mac->addr, mac->addr);
+		del_mac->handle = mac->handle;
 		list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
-		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+		list_move_tail(&mac->qe, &ucam_mod->free_q);
 	}
 
 	/* Allocate nodes */
@@ -954,69 +813,57 @@
 		mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
 		if (mac == NULL)
 			goto err_return;
-		bfa_q_qe_init(&mac->qe);
-		memcpy(mac->addr, mcaddr, ETH_ALEN);
+		ether_addr_copy(mac->addr, mcaddr);
 		list_add_tail(&mac->qe, &list_head);
 		mcaddr += ETH_ALEN;
 	}
 
 	/* Add the new entries */
 	while (!list_empty(&list_head)) {
-		bfa_q_deq(&list_head, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-		list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+		mac = list_first_entry(&list_head, struct bna_mac, qe);
+		list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
 	}
 
-	rxf->cam_fltr_cbfn = cbfn;
-	rxf->cam_fltr_cbarg = rx->bna->bnad;
 	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 
 	return BNA_CB_SUCCESS;
 
 err_return:
 	while (!list_empty(&list_head)) {
-		bfa_q_deq(&list_head, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+		mac = list_first_entry(&list_head, struct bna_mac, qe);
+		list_move_tail(&mac->qe, &ucam_mod->free_q);
 	}
 
 	return BNA_CB_UCAST_CAM_FULL;
 }
 
 enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
-		     void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist)
 {
 	struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
 	struct bna_rxf *rxf = &rx->rxf;
 	struct list_head list_head;
-	struct list_head *qe;
-	u8 *mcaddr;
+	const u8 *mcaddr;
 	struct bna_mac *mac, *del_mac;
 	int i;
 
 	/* Purge the pending_add_q */
 	while (!list_empty(&rxf->mcast_pending_add_q)) {
-		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
-		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+		mac = list_first_entry(&rxf->mcast_pending_add_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, &mcam_mod->free_q);
 	}
 
 	/* Schedule active_q entries for deletion */
 	while (!list_empty(&rxf->mcast_active_q)) {
-		bfa_q_deq(&rxf->mcast_active_q, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-
+		mac = list_first_entry(&rxf->mcast_active_q,
+				       struct bna_mac, qe);
 		del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
-
-		memcpy(del_mac, mac, sizeof(*del_mac));
+		ether_addr_copy(del_mac->addr, mac->addr);
+		del_mac->handle = mac->handle;
 		list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
 		mac->handle = NULL;
-		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+		list_move_tail(&mac->qe, &mcam_mod->free_q);
 	}
 
 	/* Allocate nodes */
@@ -1025,8 +872,7 @@
 		mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
 		if (mac == NULL)
 			goto err_return;
-		bfa_q_qe_init(&mac->qe);
-		memcpy(mac->addr, mcaddr, ETH_ALEN);
+		ether_addr_copy(mac->addr, mcaddr);
 		list_add_tail(&mac->qe, &list_head);
 
 		mcaddr += ETH_ALEN;
@@ -1034,70 +880,52 @@
 
 	/* Add the new entries */
 	while (!list_empty(&list_head)) {
-		bfa_q_deq(&list_head, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+		mac = list_first_entry(&list_head, struct bna_mac, qe);
+		list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
 	}
 
-	rxf->cam_fltr_cbfn = cbfn;
-	rxf->cam_fltr_cbarg = rx->bna->bnad;
 	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 
 	return BNA_CB_SUCCESS;
 
 err_return:
 	while (!list_empty(&list_head)) {
-		bfa_q_deq(&list_head, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+		mac = list_first_entry(&list_head, struct bna_mac, qe);
+		list_move_tail(&mac->qe, &mcam_mod->free_q);
 	}
 
 	return BNA_CB_MCAST_LIST_FULL;
 }
 
 void
-bna_rx_mcast_delall(struct bna_rx *rx,
-		    void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_delall(struct bna_rx *rx)
 {
 	struct bna_rxf *rxf = &rx->rxf;
-	struct list_head *qe;
 	struct bna_mac *mac, *del_mac;
 	int need_hw_config = 0;
 
 	/* Purge all entries from pending_add_q */
 	while (!list_empty(&rxf->mcast_pending_add_q)) {
-		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+		mac = list_first_entry(&rxf->mcast_pending_add_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
 	}
 
 	/* Schedule all entries in active_q for deletion */
 	while (!list_empty(&rxf->mcast_active_q)) {
-		bfa_q_deq(&rxf->mcast_active_q, &qe);
-		mac = (struct bna_mac *)qe;
-		bfa_q_qe_init(&mac->qe);
-
+		mac = list_first_entry(&rxf->mcast_active_q,
+				       struct bna_mac, qe);
+		list_del(&mac->qe);
 		del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
-
 		memcpy(del_mac, mac, sizeof(*del_mac));
 		list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
 		mac->handle = NULL;
-		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+		list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
 		need_hw_config = 1;
 	}
 
-	if (need_hw_config) {
-		rxf->cam_fltr_cbfn = cbfn;
-		rxf->cam_fltr_cbarg = rx->bna->bnad;
+	if (need_hw_config)
 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
-		return;
-	}
-
-	if (cbfn)
-		(*cbfn)(rx->bna->bnad, rx);
 }
 
 void
@@ -1105,12 +933,12 @@
 {
 	struct bna_rxf *rxf = &rx->rxf;
 	int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
-	int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+	int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
 	int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
 
 	rxf->vlan_filter_table[index] |= bit;
 	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
-		rxf->vlan_pending_bitmask |= (1 << group_id);
+		rxf->vlan_pending_bitmask |= BIT(group_id);
 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 	}
 }
@@ -1120,12 +948,12 @@
 {
 	struct bna_rxf *rxf = &rx->rxf;
 	int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
-	int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+	int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
 	int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
 
 	rxf->vlan_filter_table[index] &= ~bit;
 	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
-		rxf->vlan_pending_bitmask |= (1 << group_id);
+		rxf->vlan_pending_bitmask |= BIT(group_id);
 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 	}
 }
@@ -1134,23 +962,21 @@
 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
 {
 	struct bna_mac *mac = NULL;
-	struct list_head *qe;
 
 	/* Delete MAC addresses previousely added */
 	if (!list_empty(&rxf->ucast_pending_del_q)) {
-		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
+		mac = list_first_entry(&rxf->ucast_pending_del_q,
+				       struct bna_mac, qe);
 		bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
-		bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
+		list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
 		return 1;
 	}
 
 	/* Set default unicast MAC */
 	if (rxf->ucast_pending_set) {
 		rxf->ucast_pending_set = 0;
-		memcpy(rxf->ucast_active_mac.addr,
-			rxf->ucast_pending_mac->addr, ETH_ALEN);
+		ether_addr_copy(rxf->ucast_active_mac.addr,
+				rxf->ucast_pending_mac->addr);
 		rxf->ucast_active_set = 1;
 		bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
 			BFI_ENET_H2I_MAC_UCAST_SET_REQ);
@@ -1159,9 +985,8 @@
 
 	/* Add additional MAC entries */
 	if (!list_empty(&rxf->ucast_pending_add_q)) {
-		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
+		mac = list_first_entry(&rxf->ucast_pending_add_q,
+				       struct bna_mac, qe);
 		list_add_tail(&mac->qe, &rxf->ucast_active_q);
 		bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
 		return 1;
@@ -1173,33 +998,30 @@
 static int
 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
 {
-	struct list_head *qe;
 	struct bna_mac *mac;
 
 	/* Throw away delete pending ucast entries */
 	while (!list_empty(&rxf->ucast_pending_del_q)) {
-		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
-		bfa_q_qe_init(qe);
-		mac = (struct bna_mac *)qe;
+		mac = list_first_entry(&rxf->ucast_pending_del_q,
+				       struct bna_mac, qe);
 		if (cleanup == BNA_SOFT_CLEANUP)
-			bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
-					    mac);
+			list_move_tail(&mac->qe,
+				       bna_ucam_mod_del_q(rxf->rx->bna));
 		else {
 			bna_bfi_ucast_req(rxf, mac,
-				BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
-			bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
-					    mac);
+					  BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+			list_move_tail(&mac->qe,
+				       bna_ucam_mod_del_q(rxf->rx->bna));
 			return 1;
 		}
 	}
 
 	/* Move active ucast entries to pending_add_q */
 	while (!list_empty(&rxf->ucast_active_q)) {
-		bfa_q_deq(&rxf->ucast_active_q, &qe);
-		bfa_q_qe_init(qe);
-		list_add_tail(qe, &rxf->ucast_pending_add_q);
+		mac = list_first_entry(&rxf->ucast_active_q,
+				       struct bna_mac, qe);
+		list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
 		if (cleanup == BNA_HARD_CLEANUP) {
-			mac = (struct bna_mac *)qe;
 			bna_bfi_ucast_req(rxf, mac,
 				BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
 			return 1;
@@ -1654,14 +1476,11 @@
 bna_rx_sm_started_entry(struct bna_rx *rx)
 {
 	struct bna_rxp *rxp;
-	struct list_head *qe_rxp;
 	int is_regular = (rx->type == BNA_RX_T_REGULAR);
 
 	/* Start IB */
-	list_for_each(qe_rxp, &rx->rxp_q) {
-		rxp = (struct bna_rxp *)qe_rxp;
+	list_for_each_entry(rxp, &rx->rxp_q, qe)
 		bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
-	}
 
 	bna_ethport_cb_rx_started(&rx->bna->ethport);
 }
@@ -1804,7 +1623,6 @@
 	struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
 	struct bna_rxp *rxp = NULL;
 	struct bna_rxq *q0 = NULL, *q1 = NULL;
-	struct list_head *rxp_qe;
 	int i;
 
 	bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -1814,11 +1632,9 @@
 
 	cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
 	cfg_req->num_queue_sets = rx->num_paths;
-	for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
-		i < rx->num_paths;
-		i++, rxp_qe = bfa_q_next(rxp_qe)) {
-		rxp = (struct bna_rxp *)rxp_qe;
-
+	for (i = 0; i < rx->num_paths; i++) {
+		rxp = rxp ? list_next_entry(rxp, qe)
+			: list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
 		GET_RXQS(rxp, q0, q1);
 		switch (rxp->type) {
 		case BNA_RXP_SLR:
@@ -1921,13 +1737,10 @@
 bna_rx_enet_stop(struct bna_rx *rx)
 {
 	struct bna_rxp *rxp;
-	struct list_head		 *qe_rxp;
 
 	/* Stop IB */
-	list_for_each(qe_rxp, &rx->rxp_q) {
-		rxp = (struct bna_rxp *)qe_rxp;
+	list_for_each_entry(rxp, &rx->rxp_q, qe)
 		bna_ib_stop(rx->bna, &rxp->cq.ib);
-	}
 
 	bna_bfi_rx_enet_stop(rx);
 }
@@ -1957,12 +1770,10 @@
 bna_rxq_get(struct bna_rx_mod *rx_mod)
 {
 	struct bna_rxq *rxq = NULL;
-	struct list_head	*qe = NULL;
 
-	bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+	rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
+	list_del(&rxq->qe);
 	rx_mod->rxq_free_count--;
-	rxq = (struct bna_rxq *)qe;
-	bfa_q_qe_init(&rxq->qe);
 
 	return rxq;
 }
@@ -1970,7 +1781,6 @@
 static void
 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
 {
-	bfa_q_qe_init(&rxq->qe);
 	list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
 	rx_mod->rxq_free_count++;
 }
@@ -1978,13 +1788,11 @@
 static struct bna_rxp *
 bna_rxp_get(struct bna_rx_mod *rx_mod)
 {
-	struct list_head	*qe = NULL;
 	struct bna_rxp *rxp = NULL;
 
-	bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+	rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
+	list_del(&rxp->qe);
 	rx_mod->rxp_free_count--;
-	rxp = (struct bna_rxp *)qe;
-	bfa_q_qe_init(&rxp->qe);
 
 	return rxp;
 }
@@ -1992,7 +1800,6 @@
 static void
 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
 {
-	bfa_q_qe_init(&rxp->qe);
 	list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
 	rx_mod->rxp_free_count++;
 }
@@ -2000,18 +1807,16 @@
 static struct bna_rx *
 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
 {
-	struct list_head	*qe = NULL;
 	struct bna_rx *rx = NULL;
 
-	if (type == BNA_RX_T_REGULAR) {
-		bfa_q_deq(&rx_mod->rx_free_q, &qe);
-	} else
-		bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
+	BUG_ON(list_empty(&rx_mod->rx_free_q));
+	if (type == BNA_RX_T_REGULAR)
+		rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
+	else
+		rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
 
 	rx_mod->rx_free_count--;
-	rx = (struct bna_rx *)qe;
-	bfa_q_qe_init(&rx->qe);
-	list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+	list_move_tail(&rx->qe, &rx_mod->rx_active_q);
 	rx->type = type;
 
 	return rx;
@@ -2020,32 +1825,13 @@
 static void
 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
 {
-	struct list_head *prev_qe = NULL;
 	struct list_head *qe;
 
-	bfa_q_qe_init(&rx->qe);
-
-	list_for_each(qe, &rx_mod->rx_free_q) {
+	list_for_each_prev(qe, &rx_mod->rx_free_q)
 		if (((struct bna_rx *)qe)->rid < rx->rid)
-			prev_qe = qe;
-		else
 			break;
-	}
 
-	if (prev_qe == NULL) {
-		/* This is the first entry */
-		bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
-	} else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
-		/* This is the last entry */
-		list_add_tail(&rx->qe, &rx_mod->rx_free_q);
-	} else {
-		/* Somewhere in the middle */
-		bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
-		bfa_q_prev(&rx->qe) = prev_qe;
-		bfa_q_next(prev_qe) = &rx->qe;
-		bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
-	}
-
+	list_add(&rx->qe, qe);
 	rx_mod->rx_free_count++;
 }
 
@@ -2199,24 +1985,20 @@
 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
 {
 	struct bna_rx *rx;
-	struct list_head *qe;
 
 	rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
 	if (type == BNA_RX_T_LOOPBACK)
 		rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
 
-	list_for_each(qe, &rx_mod->rx_active_q) {
-		rx = (struct bna_rx *)qe;
+	list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
 		if (rx->type == type)
 			bna_rx_start(rx);
-	}
 }
 
 void
 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
 {
 	struct bna_rx *rx;
-	struct list_head *qe;
 
 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
@@ -2225,13 +2007,11 @@
 
 	bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
 
-	list_for_each(qe, &rx_mod->rx_active_q) {
-		rx = (struct bna_rx *)qe;
+	list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
 		if (rx->type == type) {
 			bfa_wc_up(&rx_mod->rx_stop_wc);
 			bna_rx_stop(rx);
 		}
-	}
 
 	bfa_wc_wait(&rx_mod->rx_stop_wc);
 }
@@ -2240,15 +2020,12 @@
 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
 {
 	struct bna_rx *rx;
-	struct list_head *qe;
 
 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
 
-	list_for_each(qe, &rx_mod->rx_active_q) {
-		rx = (struct bna_rx *)qe;
+	list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
 		bna_rx_fail(rx);
-	}
 }
 
 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
@@ -2282,7 +2059,6 @@
 	for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
 		rx_ptr = &rx_mod->rx[index];
 
-		bfa_q_qe_init(&rx_ptr->qe);
 		INIT_LIST_HEAD(&rx_ptr->rxp_q);
 		rx_ptr->bna = NULL;
 		rx_ptr->rid = index;
@@ -2296,7 +2072,6 @@
 	/* build RX-path queue */
 	for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
 		rxp_ptr = &rx_mod->rxp[index];
-		bfa_q_qe_init(&rxp_ptr->qe);
 		list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
 		rx_mod->rxp_free_count++;
 	}
@@ -2304,7 +2079,6 @@
 	/* build RXQ queue */
 	for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
 		rxq_ptr = &rx_mod->rxq[index];
-		bfa_q_qe_init(&rxq_ptr->qe);
 		list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
 		rx_mod->rxq_free_count++;
 	}
@@ -2313,21 +2087,6 @@
 void
 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
 {
-	struct list_head		*qe;
-	int i;
-
-	i = 0;
-	list_for_each(qe, &rx_mod->rx_free_q)
-		i++;
-
-	i = 0;
-	list_for_each(qe, &rx_mod->rxp_free_q)
-		i++;
-
-	i = 0;
-	list_for_each(qe, &rx_mod->rxq_free_q)
-		i++;
-
 	rx_mod->bna = NULL;
 }
 
@@ -2337,7 +2096,6 @@
 	struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
 	struct bna_rxp *rxp = NULL;
 	struct bna_rxq *q0 = NULL, *q1 = NULL;
-	struct list_head *rxp_qe;
 	int i;
 
 	bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
@@ -2345,10 +2103,8 @@
 
 	rx->hw_id = cfg_rsp->hw_id;
 
-	for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
-		i < rx->num_paths;
-		i++, rxp_qe = bfa_q_next(rxp_qe)) {
-		rxp = (struct bna_rxp *)rxp_qe;
+	for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+	     i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
 		GET_RXQS(rxp, q0, q1);
 
 		/* Setup doorbells */
@@ -2396,20 +2152,19 @@
 
 	dq_depth = q_cfg->q0_depth;
 	hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
-	cq_depth = dq_depth + hq_depth;
+	cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
 
-	BNA_TO_POWER_OF_2_HIGH(cq_depth);
 	cq_size = cq_depth * BFI_CQ_WI_SIZE;
 	cq_size = ALIGN(cq_size, PAGE_SIZE);
 	cpage_count = SIZE_TO_PAGES(cq_size);
 
-	BNA_TO_POWER_OF_2_HIGH(dq_depth);
+	dq_depth = roundup_pow_of_two(dq_depth);
 	dq_size = dq_depth * BFI_RXQ_WI_SIZE;
 	dq_size = ALIGN(dq_size, PAGE_SIZE);
 	dpage_count = SIZE_TO_PAGES(dq_size);
 
 	if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
-		BNA_TO_POWER_OF_2_HIGH(hq_depth);
+		hq_depth = roundup_pow_of_two(hq_depth);
 		hq_size = hq_depth * BFI_RXQ_WI_SIZE;
 		hq_size = ALIGN(hq_size, PAGE_SIZE);
 		hpage_count = SIZE_TO_PAGES(hq_size);
@@ -2620,7 +2375,7 @@
 		if (intr_info->intr_type == BNA_INTR_T_MSIX)
 			rxp->cq.ib.intr_vector = rxp->vector;
 		else
-			rxp->cq.ib.intr_vector = (1 << rxp->vector);
+			rxp->cq.ib.intr_vector = BIT(rxp->vector);
 		rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
 		rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
 		rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
@@ -2691,7 +2446,7 @@
 		/* if multi-buffer is enabled sum of q0_depth
 		 * and q1_depth need not be a power of 2
 		 */
-		BNA_TO_POWER_OF_2_HIGH(cq_depth);
+		cq_depth = roundup_pow_of_two(cq_depth);
 		rxp->cq.ccb->q_depth = cq_depth;
 		rxp->cq.ccb->cq = &rxp->cq;
 		rxp->cq.ccb->rcb[0] = q0->rcb;
@@ -2725,7 +2480,7 @@
 
 	bfa_fsm_set_state(rx, bna_rx_sm_stopped);
 
-	rx_mod->rid_mask |= (1 << rx->rid);
+	rx_mod->rid_mask |= BIT(rx->rid);
 
 	return rx;
 }
@@ -2742,7 +2497,8 @@
 	bna_rxf_uninit(&rx->rxf);
 
 	while (!list_empty(&rx->rxp_q)) {
-		bfa_q_deq(&rx->rxp_q, &rxp);
+		rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+		list_del(&rxp->qe);
 		GET_RXQS(rxp, q0, q1);
 		if (rx->rcb_destroy_cbfn)
 			rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
@@ -2769,15 +2525,13 @@
 		bna_rxp_put(rx_mod, rxp);
 	}
 
-	list_for_each(qe, &rx_mod->rx_active_q) {
+	list_for_each(qe, &rx_mod->rx_active_q)
 		if (qe == &rx->qe) {
 			list_del(&rx->qe);
-			bfa_q_qe_init(&rx->qe);
 			break;
 		}
-	}
 
-	rx_mod->rid_mask &= ~(1 << rx->rid);
+	rx_mod->rid_mask &= ~BIT(rx->rid);
 
 	rx->bna = NULL;
 	rx->priv = NULL;
@@ -2844,8 +2598,7 @@
 
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
-		enum bna_rxmode bitmask,
-		void (*cbfn)(struct bnad *, struct bna_rx *))
+		enum bna_rxmode bitmask)
 {
 	struct bna_rxf *rxf = &rx->rxf;
 	int need_hw_config = 0;
@@ -2900,11 +2653,10 @@
 	/* Trigger h/w if needed */
 
 	if (need_hw_config) {
-		rxf->cam_fltr_cbfn = cbfn;
+		rxf->cam_fltr_cbfn = NULL;
 		rxf->cam_fltr_cbarg = rx->bna->bnad;
 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
-	} else if (cbfn)
-		(*cbfn)(rx->bna->bnad, rx);
+	}
 
 	return BNA_CB_SUCCESS;
 
@@ -2928,10 +2680,8 @@
 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
 {
 	struct bna_rxp *rxp;
-	struct list_head *qe;
 
-	list_for_each(qe, &rx->rxp_q) {
-		rxp = (struct bna_rxp *)qe;
+	list_for_each_entry(rxp, &rx->rxp_q, qe) {
 		rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
 		bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
 	}
@@ -3024,16 +2774,6 @@
 	}								\
 } while (0)
 
-#define call_tx_prio_change_cbfn(tx)					\
-do {									\
-	if ((tx)->prio_change_cbfn) {					\
-		void (*cbfn)(struct bnad *, struct bna_tx *);	\
-		cbfn = (tx)->prio_change_cbfn;				\
-		(tx)->prio_change_cbfn = NULL;				\
-		cbfn((tx)->bna->bnad, (tx));				\
-	}								\
-} while (0)
-
 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
 static void bna_tx_enet_stop(struct bna_tx *tx);
@@ -3044,7 +2784,6 @@
 	TX_E_FAIL			= 3,
 	TX_E_STARTED			= 4,
 	TX_E_STOPPED			= 5,
-	TX_E_PRIO_CHANGE		= 6,
 	TX_E_CLEANUP_DONE		= 7,
 	TX_E_BW_UPDATE			= 8,
 };
@@ -3085,10 +2824,6 @@
 		/* No-op */
 		break;
 
-	case TX_E_PRIO_CHANGE:
-		call_tx_prio_change_cbfn(tx);
-		break;
-
 	case TX_E_BW_UPDATE:
 		/* No-op */
 		break;
@@ -3109,28 +2844,23 @@
 {
 	switch (event) {
 	case TX_E_STOP:
-		tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+		tx->flags &= ~BNA_TX_F_BW_UPDATED;
 		bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
 		break;
 
 	case TX_E_FAIL:
-		tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+		tx->flags &= ~BNA_TX_F_BW_UPDATED;
 		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
 		break;
 
 	case TX_E_STARTED:
-		if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
-			tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
-				BNA_TX_F_BW_UPDATED);
+		if (tx->flags & BNA_TX_F_BW_UPDATED) {
+			tx->flags &= ~BNA_TX_F_BW_UPDATED;
 			bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
 		} else
 			bfa_fsm_set_state(tx, bna_tx_sm_started);
 		break;
 
-	case TX_E_PRIO_CHANGE:
-		tx->flags |=  BNA_TX_F_PRIO_CHANGED;
-		break;
-
 	case TX_E_BW_UPDATE:
 		tx->flags |= BNA_TX_F_BW_UPDATED;
 		break;
@@ -3144,11 +2874,9 @@
 bna_tx_sm_started_entry(struct bna_tx *tx)
 {
 	struct bna_txq *txq;
-	struct list_head		 *qe;
 	int is_regular = (tx->type == BNA_TX_T_REGULAR);
 
-	list_for_each(qe, &tx->txq_q) {
-		txq = (struct bna_txq *)qe;
+	list_for_each_entry(txq, &tx->txq_q, qe) {
 		txq->tcb->priority = txq->priority;
 		/* Start IB */
 		bna_ib_start(tx->bna, &txq->ib, is_regular);
@@ -3172,7 +2900,6 @@
 		tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
 		break;
 
-	case TX_E_PRIO_CHANGE:
 	case TX_E_BW_UPDATE:
 		bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
 		break;
@@ -3205,7 +2932,6 @@
 		bna_tx_enet_stop(tx);
 		break;
 
-	case TX_E_PRIO_CHANGE:
 	case TX_E_BW_UPDATE:
 		/* No-op */
 		break;
@@ -3225,7 +2951,6 @@
 {
 	switch (event) {
 	case TX_E_FAIL:
-	case TX_E_PRIO_CHANGE:
 	case TX_E_BW_UPDATE:
 		/* No-op */
 		break;
@@ -3256,7 +2981,6 @@
 
 	case TX_E_FAIL:
 		bfa_fsm_set_state(tx, bna_tx_sm_failed);
-		call_tx_prio_change_cbfn(tx);
 		tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
 		break;
 
@@ -3264,7 +2988,6 @@
 		bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
 		break;
 
-	case TX_E_PRIO_CHANGE:
 	case TX_E_BW_UPDATE:
 		/* No-op */
 		break;
@@ -3277,7 +3000,6 @@
 static void
 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
 {
-	call_tx_prio_change_cbfn(tx);
 	tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
 }
 
@@ -3293,7 +3015,6 @@
 		bfa_fsm_set_state(tx, bna_tx_sm_failed);
 		break;
 
-	case TX_E_PRIO_CHANGE:
 	case TX_E_BW_UPDATE:
 		/* No-op */
 		break;
@@ -3372,7 +3093,6 @@
 {
 	struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
 	struct bna_txq *txq = NULL;
-	struct list_head *qe;
 	int i;
 
 	bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -3381,11 +3101,9 @@
 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
 
 	cfg_req->num_queues = tx->num_txq;
-	for (i = 0, qe = bfa_q_first(&tx->txq_q);
-		i < tx->num_txq;
-		i++, qe = bfa_q_next(qe)) {
-		txq = (struct bna_txq *)qe;
-
+	for (i = 0; i < tx->num_txq; i++) {
+		txq = txq ? list_next_entry(txq, qe)
+			: list_first_entry(&tx->txq_q, struct bna_txq, qe);
 		bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
 		cfg_req->q_cfg[i].q.priority = txq->priority;
 
@@ -3437,13 +3155,10 @@
 bna_tx_enet_stop(struct bna_tx *tx)
 {
 	struct bna_txq *txq;
-	struct list_head		 *qe;
 
 	/* Stop IB */
-	list_for_each(qe, &tx->txq_q) {
-		txq = (struct bna_txq *)qe;
+	list_for_each_entry(txq, &tx->txq_q, qe)
 		bna_ib_stop(tx->bna, &txq->ib);
-	}
 
 	bna_bfi_tx_enet_stop(tx);
 }
@@ -3487,18 +3202,15 @@
 static struct bna_tx *
 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
 {
-	struct list_head	*qe = NULL;
 	struct bna_tx *tx = NULL;
 
 	if (list_empty(&tx_mod->tx_free_q))
 		return NULL;
-	if (type == BNA_TX_T_REGULAR) {
-		bfa_q_deq(&tx_mod->tx_free_q, &qe);
-	} else {
-		bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
-	}
-	tx = (struct bna_tx *)qe;
-	bfa_q_qe_init(&tx->qe);
+	if (type == BNA_TX_T_REGULAR)
+		tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+	else
+		tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+	list_del(&tx->qe);
 	tx->type = type;
 
 	return tx;
@@ -3509,21 +3221,18 @@
 {
 	struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
 	struct bna_txq *txq;
-	struct list_head *prev_qe;
 	struct list_head *qe;
 
 	while (!list_empty(&tx->txq_q)) {
-		bfa_q_deq(&tx->txq_q, &txq);
-		bfa_q_qe_init(&txq->qe);
+		txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
 		txq->tcb = NULL;
 		txq->tx = NULL;
-		list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+		list_move_tail(&txq->qe, &tx_mod->txq_free_q);
 	}
 
 	list_for_each(qe, &tx_mod->tx_active_q) {
 		if (qe == &tx->qe) {
 			list_del(&tx->qe);
-			bfa_q_qe_init(&tx->qe);
 			break;
 		}
 	}
@@ -3531,28 +3240,11 @@
 	tx->bna = NULL;
 	tx->priv = NULL;
 
-	prev_qe = NULL;
-	list_for_each(qe, &tx_mod->tx_free_q) {
+	list_for_each_prev(qe, &tx_mod->tx_free_q)
 		if (((struct bna_tx *)qe)->rid < tx->rid)
-			prev_qe = qe;
-		else {
 			break;
-		}
-	}
 
-	if (prev_qe == NULL) {
-		/* This is the first entry */
-		bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
-	} else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
-		/* This is the last entry */
-		list_add_tail(&tx->qe, &tx_mod->tx_free_q);
-	} else {
-		/* Somewhere in the middle */
-		bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
-		bfa_q_prev(&tx->qe) = prev_qe;
-		bfa_q_next(prev_qe) = &tx->qe;
-		bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
-	}
+	list_add(&tx->qe, qe);
 }
 
 static void
@@ -3585,7 +3277,6 @@
 {
 	struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
 	struct bna_txq *txq = NULL;
-	struct list_head *qe;
 	int i;
 
 	bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
@@ -3593,10 +3284,8 @@
 
 	tx->hw_id = cfg_rsp->hw_id;
 
-	for (i = 0, qe = bfa_q_first(&tx->txq_q);
-		i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
-		txq = (struct bna_txq *)qe;
-
+	for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
+	     i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
 		/* Setup doorbells */
 		txq->tcb->i_dbell->doorbell_addr =
 			tx->bna->pcidev.pci_bar_kva
@@ -3624,12 +3313,9 @@
 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
 {
 	struct bna_tx *tx;
-	struct list_head		*qe;
 
-	list_for_each(qe, &tx_mod->tx_active_q) {
-		tx = (struct bna_tx *)qe;
+	list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
 		bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
-	}
 }
 
 void
@@ -3689,7 +3375,6 @@
 	struct bna_tx_mod *tx_mod = &bna->tx_mod;
 	struct bna_tx *tx;
 	struct bna_txq *txq;
-	struct list_head *qe;
 	int page_count;
 	int i;
 
@@ -3719,9 +3404,8 @@
 		if (list_empty(&tx_mod->txq_free_q))
 			goto err_return;
 
-		bfa_q_deq(&tx_mod->txq_free_q, &txq);
-		bfa_q_qe_init(&txq->qe);
-		list_add_tail(&txq->qe, &tx->txq_q);
+		txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
+		list_move_tail(&txq->qe, &tx->txq_q);
 		txq->tx = tx;
 	}
 
@@ -3760,8 +3444,7 @@
 	/* TxQ */
 
 	i = 0;
-	list_for_each(qe, &tx->txq_q) {
-		txq = (struct bna_txq *)qe;
+	list_for_each_entry(txq, &tx->txq_q, qe) {
 		txq->tcb = (struct bna_tcb *)
 		res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
 		txq->tx_packets = 0;
@@ -3779,7 +3462,7 @@
 					intr_info->idl[0].vector :
 					intr_info->idl[i].vector;
 		if (intr_info->intr_type == BNA_INTR_T_INTX)
-			txq->ib.intr_vector = (1 <<  txq->ib.intr_vector);
+			txq->ib.intr_vector = BIT(txq->ib.intr_vector);
 		txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
 		txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
 		txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
@@ -3821,7 +3504,7 @@
 
 	bfa_fsm_set_state(tx, bna_tx_sm_stopped);
 
-	tx_mod->rid_mask |= (1 << tx->rid);
+	tx_mod->rid_mask |= BIT(tx->rid);
 
 	return tx;
 
@@ -3834,15 +3517,12 @@
 bna_tx_destroy(struct bna_tx *tx)
 {
 	struct bna_txq *txq;
-	struct list_head *qe;
 
-	list_for_each(qe, &tx->txq_q) {
-		txq = (struct bna_txq *)qe;
+	list_for_each_entry(txq, &tx->txq_q, qe)
 		if (tx->tcb_destroy_cbfn)
 			(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
-	}
 
-	tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
+	tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
 	bna_tx_free(tx);
 }
 
@@ -3920,9 +3600,7 @@
 
 	for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
 		tx_mod->tx[i].rid = i;
-		bfa_q_qe_init(&tx_mod->tx[i].qe);
 		list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
-		bfa_q_qe_init(&tx_mod->txq[i].qe);
 		list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
 	}
 
@@ -3935,17 +3613,6 @@
 void
 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
 {
-	struct list_head		*qe;
-	int i;
-
-	i = 0;
-	list_for_each(qe, &tx_mod->tx_free_q)
-		i++;
-
-	i = 0;
-	list_for_each(qe, &tx_mod->txq_free_q)
-		i++;
-
 	tx_mod->bna = NULL;
 }
 
@@ -3953,24 +3620,20 @@
 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
 {
 	struct bna_tx *tx;
-	struct list_head		*qe;
 
 	tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
 	if (type == BNA_TX_T_LOOPBACK)
 		tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
 
-	list_for_each(qe, &tx_mod->tx_active_q) {
-		tx = (struct bna_tx *)qe;
+	list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
 		if (tx->type == type)
 			bna_tx_start(tx);
-	}
 }
 
 void
 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
 {
 	struct bna_tx *tx;
-	struct list_head		*qe;
 
 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
@@ -3979,13 +3642,11 @@
 
 	bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
 
-	list_for_each(qe, &tx_mod->tx_active_q) {
-		tx = (struct bna_tx *)qe;
+	list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
 		if (tx->type == type) {
 			bfa_wc_up(&tx_mod->tx_stop_wc);
 			bna_tx_stop(tx);
 		}
-	}
 
 	bfa_wc_wait(&tx_mod->tx_stop_wc);
 }
@@ -3994,25 +3655,19 @@
 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
 {
 	struct bna_tx *tx;
-	struct list_head		*qe;
 
 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
 
-	list_for_each(qe, &tx_mod->tx_active_q) {
-		tx = (struct bna_tx *)qe;
+	list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
 		bna_tx_fail(tx);
-	}
 }
 
 void
 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
 {
 	struct bna_txq *txq;
-	struct list_head *qe;
 
-	list_for_each(qe, &tx->txq_q) {
-		txq = (struct bna_txq *)qe;
+	list_for_each_entry(txq, &tx->txq_q, qe)
 		bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
-	}
 }
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d0a7a56..e0e797f 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -135,7 +135,6 @@
 enum bna_tx_flags {
 	BNA_TX_F_ENET_STARTED	= 1,
 	BNA_TX_F_ENABLED	= 2,
-	BNA_TX_F_PRIO_CHANGED	= 4,
 	BNA_TX_F_BW_UPDATED	= 8,
 };
 
@@ -182,17 +181,11 @@
 	BNA_RX_MOD_F_ENET_LOOPBACK	= 2,
 };
 
-enum bna_rxf_flags {
-	BNA_RXF_F_PAUSED		= 1,
-};
-
 enum bna_rxf_event {
 	RXF_E_START			= 1,
 	RXF_E_STOP			= 2,
 	RXF_E_FAIL			= 3,
 	RXF_E_CONFIG			= 4,
-	RXF_E_PAUSE			= 5,
-	RXF_E_RESUME			= 6,
 	RXF_E_FW_RESP			= 7,
 };
 
@@ -362,9 +355,6 @@
 	void (*stop_cbfn)(void *);
 	void			*stop_cbarg;
 
-	/* Callback for bna_enet_pause_config() */
-	void (*pause_cbfn)(struct bnad *);
-
 	/* Callback for bna_enet_mtu_set() */
 	void (*mtu_cbfn)(struct bnad *);
 
@@ -498,9 +488,6 @@
 	void (*stop_cbfn)(void *arg, struct bna_tx *tx);
 	void			*stop_cbarg;
 
-	/* callback for bna_tx_prio_set() */
-	void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
-
 	struct bfa_msgq_cmd_entry msgq_cmd;
 	union {
 		struct bfi_enet_tx_cfg_req	cfg_req;
@@ -676,7 +663,6 @@
 	enum bna_rx_type rx_type;
 	int			num_paths;
 	enum bna_rxp_type rxp_type;
-	int			paused;
 	int			coalescing_timeo;
 	/*
 	 * Small/Large (or Header/Data) buffer size to be configured
@@ -721,7 +707,6 @@
 /* RxF structure (hardware Rx Function) */
 struct bna_rxf {
 	bfa_fsm_t		fsm;
-	enum bna_rxf_flags flags;
 
 	struct bfa_msgq_cmd_entry msgq_cmd;
 	union {
@@ -742,10 +727,6 @@
 	void (*stop_cbfn) (struct bna_rx *rx);
 	struct bna_rx *stop_cbarg;
 
-	/* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
-	void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
-	struct bnad *oper_state_cbarg;
-
 	/**
 	 * callback for:
 	 *	bna_rxf_ucast_set()
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index caae6cb..0612b19 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -57,7 +57,8 @@
 static u32 bna_id;
 static struct mutex bnad_list_mutex;
 static LIST_HEAD(bnad_list);
-static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static const u8 bnad_bcast_addr[] __aligned(2) =
+	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 
 /*
  * Local MACROS
@@ -308,7 +309,7 @@
 		}
 	}
 
-	BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
+	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
 
 	return 0;
 }
@@ -724,7 +725,6 @@
 			cmpl->valid = 0;
 			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
 		}
-		cmpl = &cq[ccb->producer_index];
 	}
 
 	napi_gro_flush(&rx_ctrl->napi, false);
@@ -757,7 +757,7 @@
 	struct bna_ccb *ccb = (struct bna_ccb *)data;
 
 	if (ccb) {
-		((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
 	}
 
@@ -875,9 +875,9 @@
 {
 	struct net_device *netdev = bnad->netdev;
 
-	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
 	if (is_zero_ether_addr(netdev->dev_addr))
-		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+		ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
 }
 
 /* Control Path Handlers */
@@ -946,8 +946,7 @@
 	if (link_up) {
 		if (!netif_carrier_ok(bnad->netdev)) {
 			uint tx_id, tcb_id;
-			printk(KERN_WARNING "bna: %s link up\n",
-				bnad->netdev->name);
+			netdev_info(bnad->netdev, "link up\n");
 			netif_carrier_on(bnad->netdev);
 			BNAD_UPDATE_CTR(bnad, link_toggle);
 			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
@@ -966,10 +965,6 @@
 						/*
 						 * Force an immediate
 						 * Transmit Schedule */
-						printk(KERN_INFO "bna: %s %d "
-						      "TXQ_STARTED\n",
-						       bnad->netdev->name,
-						       txq_id);
 						netif_wake_subqueue(
 								bnad->netdev,
 								txq_id);
@@ -987,8 +982,7 @@
 		}
 	} else {
 		if (netif_carrier_ok(bnad->netdev)) {
-			printk(KERN_WARNING "bna: %s link down\n",
-				bnad->netdev->name);
+			netdev_info(bnad->netdev, "link down\n");
 			netif_carrier_off(bnad->netdev);
 			BNAD_UPDATE_CTR(bnad, link_toggle);
 		}
@@ -1058,8 +1052,6 @@
 		txq_id = tcb->id;
 		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
 		netif_stop_subqueue(bnad->netdev, txq_id);
-		printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
-			bnad->netdev->name, txq_id);
 	}
 }
 
@@ -1082,8 +1074,6 @@
 		BUG_ON(*(tcb->hw_consumer_index) != 0);
 
 		if (netif_carrier_ok(bnad->netdev)) {
-			printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
-				bnad->netdev->name, txq_id);
 			netif_wake_subqueue(bnad->netdev, txq_id);
 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
 		}
@@ -1094,8 +1084,8 @@
 	 * get a 0 MAC address. We try to get the MAC address
 	 * again here.
 	 */
-	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
-		bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
+	if (is_zero_ether_addr(bnad->perm_addr)) {
+		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
 		bnad_set_netdev_perm_addr(bnad);
 	}
 }
@@ -1703,7 +1693,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
+	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -1714,7 +1704,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
+	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -1725,7 +1715,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
+	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -1736,7 +1726,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
+	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -1862,8 +1852,7 @@
 	struct netdev_hw_addr *mc_addr;
 
 	netdev_for_each_mc_addr(mc_addr, netdev) {
-		memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
-							ETH_ALEN);
+		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
 		i++;
 	}
 }
@@ -2137,7 +2126,7 @@
 		current_err = bnad_setup_rx(bnad, rx_id);
 		if (current_err && !err) {
 			err = current_err;
-			pr_err("RXQ:%u setup failed\n", rx_id);
+			netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
 		}
 	}
 
@@ -2338,7 +2327,7 @@
  * Called with bnad->bna_lock held
  */
 int
-bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
 {
 	int ret;
 
@@ -2349,7 +2338,7 @@
 	if (!bnad->rx_info[0].rx)
 		return 0;
 
-	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
 	if (ret != BNA_CB_SUCCESS)
 		return -EADDRNOTAVAIL;
 
@@ -2367,8 +2356,8 @@
 	init_completion(&bnad->bnad_completions.mcast_comp);
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
-				bnad_cb_rx_mcast_add);
+	ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
+			       bnad_cb_rx_mcast_add);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	if (ret == BNA_CB_SUCCESS)
@@ -2673,8 +2662,9 @@
 	if (ret < 0) {
 		goto intx_mode;
 	} else if (ret < bnad->msix_num) {
-		pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
-			ret, bnad->msix_num);
+		dev_warn(&bnad->pcidev->dev,
+			 "%d MSI-X vectors allocated < %d requested\n",
+			 ret, bnad->msix_num);
 
 		spin_lock_irqsave(&bnad->bna_lock, flags);
 		/* ret = #of vectors that we got */
@@ -2696,7 +2686,8 @@
 	return;
 
 intx_mode:
-	pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
+	dev_warn(&bnad->pcidev->dev,
+		 "MSI-X enable failed - operating in INTx mode\n");
 
 	kfree(bnad->msix_table);
 	bnad->msix_table = NULL;
@@ -2754,7 +2745,7 @@
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bna_enet_mtu_set(&bnad->bna.enet,
 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
-	bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+	bna_enet_pause_config(&bnad->bna.enet, &pause_config);
 	bna_enet_enable(&bnad->bna.enet);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
@@ -3128,7 +3119,7 @@
 	int entry;
 
 	if (netdev_uc_empty(bnad->netdev)) {
-		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
 		return;
 	}
 
@@ -3141,13 +3132,11 @@
 
 	entry = 0;
 	netdev_for_each_uc_addr(ha, netdev) {
-		memcpy(&mac_list[entry * ETH_ALEN],
-		       &ha->addr[0], ETH_ALEN);
+		ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
 		entry++;
 	}
 
-	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
-			mac_list, NULL);
+	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
 	kfree(mac_list);
 
 	if (ret != BNA_CB_SUCCESS)
@@ -3158,7 +3147,7 @@
 	/* ucast packets not in UCAM are routed to default function */
 mode_default:
 	bnad->cfg_flags |= BNAD_CF_DEFAULT;
-	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
 }
 
 static void
@@ -3183,12 +3172,11 @@
 	if (mac_list == NULL)
 		goto mode_allmulti;
 
-	memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+	ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
 
 	/* copy rest of the MCAST addresses */
 	bnad_netdev_mc_list_get(netdev, mac_list);
-	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
-			mac_list, NULL);
+	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
 	kfree(mac_list);
 
 	if (ret != BNA_CB_SUCCESS)
@@ -3198,7 +3186,7 @@
 
 mode_allmulti:
 	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
-	bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+	bna_rx_mcast_delall(bnad->rx_info[0].rx);
 }
 
 void
@@ -3237,7 +3225,7 @@
 
 	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
 			BNA_RXMODE_ALLMULTI;
-	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
+	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
 
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
@@ -3248,19 +3236,18 @@
  * in a non-blocking context.
  */
 static int
-bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+bnad_set_mac_address(struct net_device *netdev, void *addr)
 {
 	int err;
 	struct bnad *bnad = netdev_priv(netdev);
-	struct sockaddr *sa = (struct sockaddr *)mac_addr;
+	struct sockaddr *sa = (struct sockaddr *)addr;
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 
 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
-
 	if (!err)
-		memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+		ether_addr_copy(netdev->dev_addr, sa->sa_data);
 
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
@@ -3487,8 +3474,8 @@
 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
 		return -ENOMEM;
 	}
-	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
-	       (unsigned long long) bnad->mmio_len);
+	dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
+		 (unsigned long long) bnad->mmio_len);
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	if (!bnad_msix_disable)
@@ -3609,13 +3596,10 @@
 	struct bfa_pcidev pcidev_info;
 	unsigned long flags;
 
-	pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
-	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
-
 	mutex_lock(&bnad_fwimg_mutex);
 	if (!cna_get_firmware_buf(pdev)) {
 		mutex_unlock(&bnad_fwimg_mutex);
-		pr_warn("Failed to load Firmware Image!\n");
+		dev_err(&pdev->dev, "failed to load firmware image!\n");
 		return -ENODEV;
 	}
 	mutex_unlock(&bnad_fwimg_mutex);
@@ -3693,13 +3677,13 @@
 
 	/* Set up timers */
 	setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
-				((unsigned long)bnad));
+		    (unsigned long)bnad);
 	setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
-				((unsigned long)bnad));
+		    (unsigned long)bnad);
 	setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
-				((unsigned long)bnad));
+		    (unsigned long)bnad);
 	setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
-				((unsigned long)bnad));
+		    (unsigned long)bnad);
 
 	/*
 	 * Start the chip
@@ -3708,8 +3692,7 @@
 	 */
 	err = bnad_ioceth_enable(bnad);
 	if (err) {
-		pr_err("BNA: Initialization failed err=%d\n",
-		       err);
+		dev_err(&pdev->dev, "initialization failed err=%d\n", err);
 		goto probe_success;
 	}
 
@@ -3742,7 +3725,7 @@
 
 	/* Get the burnt-in mac */
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
+	bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
 	bnad_set_netdev_perm_addr(bnad);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
@@ -3751,7 +3734,7 @@
 	/* Finally, reguister with net_device layer */
 	err = register_netdev(netdev);
 	if (err) {
-		pr_err("BNA : Registering with netdev failed\n");
+		dev_err(&pdev->dev, "registering net device failed\n");
 		goto probe_uninit;
 	}
 	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
@@ -3803,7 +3786,6 @@
 	if (!netdev)
 		return;
 
-	pr_info("%s bnad_pci_remove\n", netdev->name);
 	bnad = netdev_priv(netdev);
 	bna = &bnad->bna;
 
@@ -3864,15 +3846,14 @@
 {
 	int err;
 
-	pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
-			BNAD_VERSION);
+	pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
+		BNAD_VERSION);
 
 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
 
 	err = pci_register_driver(&bnad_pci_driver);
 	if (err < 0) {
-		pr_err("bna : PCI registration failed in module init "
-		       "(%d)\n", err);
+		pr_err("bna: PCI driver registration failed err=%d\n", err);
 		return err;
 	}
 
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 7ead6c2..faedbf2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -344,7 +344,7 @@
 	struct bnad_completion bnad_completions;
 
 	/* Burnt in MAC address */
-	mac_t			perm_addr;
+	u8			perm_addr[ETH_ALEN];
 
 	struct workqueue_struct *work_q;
 
@@ -385,7 +385,7 @@
 /* Netdev entry point prototypes */
 void bnad_set_rx_mode(struct net_device *netdev);
 struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
-int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr);
 int bnad_enable_default_bcast(struct bnad *bnad);
 void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
 void bnad_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 72c8955..8fc246e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -76,8 +76,7 @@
 		fw_debug->debug_buffer = NULL;
 		kfree(fw_debug);
 		fw_debug = NULL;
-		pr_warn("bnad %s: Failed to collect fwtrc\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed to collect fwtrc\n");
 		return -ENOMEM;
 	}
 
@@ -117,8 +116,7 @@
 		fw_debug->debug_buffer = NULL;
 		kfree(fw_debug);
 		fw_debug = NULL;
-		pr_warn("bna %s: Failed to collect fwsave\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed to collect fwsave\n");
 		return -ENOMEM;
 	}
 
@@ -217,8 +215,7 @@
 		drv_info->debug_buffer = NULL;
 		kfree(drv_info);
 		drv_info = NULL;
-		pr_warn("bna %s: Failed to collect drvinfo\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed to collect drvinfo\n");
 		return -ENOMEM;
 	}
 
@@ -271,15 +268,15 @@
 	area = (offset >> 15) & 0x7;
 	if (area == 0) {
 		/* PCIe core register */
-		if ((offset + (len<<2)) > 0x8000)	/* 8k dwords or 32KB */
+		if (offset + (len << 2) > 0x8000)	/* 8k dwords or 32KB */
 			return BFA_STATUS_EINVAL;
 	} else if (area == 0x1) {
 		/* CB 32 KB memory page */
-		if ((offset + (len<<2)) > 0x10000)	/* 8k dwords or 32KB */
+		if (offset + (len << 2) > 0x10000)	/* 8k dwords or 32KB */
 			return BFA_STATUS_EINVAL;
 	} else {
 		/* CB register space 64KB */
-		if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+		if (offset + (len << 2) > BFA_REG_ADDRMSK(ioc))
 			return BFA_STATUS_EINVAL;
 	}
 	return BFA_STATUS_OK;
@@ -321,27 +318,20 @@
 	unsigned long flags;
 	void *kern_buf;
 
-	/* Allocate memory to store the user space buf */
-	kern_buf = kzalloc(nbytes, GFP_KERNEL);
-	if (!kern_buf)
-		return -ENOMEM;
-
-	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
-		kfree(kern_buf);
-		return -ENOMEM;
-	}
+	/* Copy the user space buf */
+	kern_buf = memdup_user(buf, nbytes);
+	if (IS_ERR(kern_buf))
+		return PTR_ERR(kern_buf);
 
 	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
 	if (rc < 2) {
-		pr_warn("bna %s: Failed to read user buffer\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed to read user buffer\n");
 		kfree(kern_buf);
 		return -EINVAL;
 	}
 
 	kfree(kern_buf);
 	kfree(bnad->regdata);
-	bnad->regdata = NULL;
 	bnad->reglen = 0;
 
 	bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
@@ -355,8 +345,7 @@
 	/* offset and len sanity check */
 	rc = bna_reg_offset_check(ioc, addr, len);
 	if (rc) {
-		pr_warn("bna %s: Failed reg offset check\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed reg offset check\n");
 		kfree(bnad->regdata);
 		bnad->regdata = NULL;
 		bnad->reglen = 0;
@@ -388,20 +377,14 @@
 	unsigned long flags;
 	void *kern_buf;
 
-	/* Allocate memory to store the user space buf */
-	kern_buf = kzalloc(nbytes, GFP_KERNEL);
-	if (!kern_buf)
-		return -ENOMEM;
-
-	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
-		kfree(kern_buf);
-		return -ENOMEM;
-	}
+	/* Copy the user space buf */
+	kern_buf = memdup_user(buf, nbytes);
+	if (IS_ERR(kern_buf))
+		return PTR_ERR(kern_buf);
 
 	rc = sscanf(kern_buf, "%x:%x", &addr, &val);
 	if (rc < 2) {
-		pr_warn("bna %s: Failed to read user buffer\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed to read user buffer\n");
 		kfree(kern_buf);
 		return -EINVAL;
 	}
@@ -412,8 +395,7 @@
 	/* offset and len sanity check */
 	rc = bna_reg_offset_check(ioc, addr, 1);
 	if (rc) {
-		pr_warn("bna %s: Failed reg offset check\n",
-			pci_name(bnad->pcidev));
+		netdev_warn(bnad->netdev, "failed reg offset check\n");
 		return -EINVAL;
 	}
 
@@ -525,7 +507,8 @@
 		bna_debugfs_root = debugfs_create_dir("bna", NULL);
 		atomic_set(&bna_debugfs_port_count, 0);
 		if (!bna_debugfs_root) {
-			pr_warn("BNA: debugfs root dir creation failed\n");
+			netdev_warn(bnad->netdev,
+				    "debugfs root dir creation failed\n");
 			return;
 		}
 	}
@@ -536,8 +519,8 @@
 		bnad->port_debugfs_root =
 			debugfs_create_dir(name, bna_debugfs_root);
 		if (!bnad->port_debugfs_root) {
-			pr_warn("bna pci_dev %s: root dir creation failed\n",
-				pci_name(bnad->pcidev));
+			netdev_warn(bnad->netdev,
+				    "debugfs root dir creation failed\n");
 			return;
 		}
 
@@ -552,9 +535,9 @@
 							bnad,
 							file->fops);
 			if (!bnad->bnad_dentry_files[i]) {
-				pr_warn(
-				     "BNA pci_dev:%s: create %s entry failed\n",
-				     pci_name(bnad->pcidev), file->name);
+				netdev_warn(bnad->netdev,
+					    "create %s entry failed\n",
+					    file->name);
 				return;
 			}
 		}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 12f344d..2bdfc5d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -445,13 +445,13 @@
 
 	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
 	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
-	    !BNA_POWER_OF_2(ringparam->rx_pending)) {
+	    !is_power_of_2(ringparam->rx_pending)) {
 		mutex_unlock(&bnad->conf_mutex);
 		return -EINVAL;
 	}
 	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
 	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
-	    !BNA_POWER_OF_2(ringparam->tx_pending)) {
+	    !is_power_of_2(ringparam->tx_pending)) {
 		mutex_unlock(&bnad->conf_mutex);
 		return -EINVAL;
 	}
@@ -533,7 +533,7 @@
 		pause_config.rx_pause = pauseparam->rx_pause;
 		pause_config.tx_pause = pauseparam->tx_pause;
 		spin_lock_irqsave(&bnad->bna_lock, flags);
-		bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+		bna_enet_pause_config(&bnad->bna.enet, &pause_config);
 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
 	}
 	mutex_unlock(&bnad->conf_mutex);
@@ -1080,7 +1080,7 @@
 
 	ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
 	if (ret) {
-		pr_err("BNA: Can't locate firmware %s\n", eflash->data);
+		netdev_err(netdev, "can't load firmware %s\n", eflash->data);
 		goto out;
 	}
 
@@ -1093,7 +1093,7 @@
 				bnad->id, (u8 *)fw->data, fw->size, 0,
 				bnad_cb_completion, &fcomp);
 	if (ret != BFA_STATUS_OK) {
-		pr_warn("BNA: Flash update failed with err: %d\n", ret);
+		netdev_warn(netdev, "flash update failed with err=%d\n", ret);
 		ret = -EIO;
 		spin_unlock_irq(&bnad->bna_lock);
 		goto out;
@@ -1103,8 +1103,9 @@
 	wait_for_completion(&fcomp.comp);
 	if (fcomp.comp_status != BFA_STATUS_OK) {
 		ret = -EIO;
-		pr_warn("BNA: Firmware image update to flash failed with: %d\n",
-			fcomp.comp_status);
+		netdev_warn(netdev,
+			    "firmware image update failed with err=%d\n",
+			    fcomp.comp_status);
 	}
 out:
 	release_firmware(fw);
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 28e7d0f..75f8f1a 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -42,66 +42,4 @@
 #define CNA_FW_FILE_CT2	"ct2fw-3.2.5.1.bin"
 #define FC_SYMNAME_MAX	256	/*!< max name server symbolic name size */
 
-#pragma pack(1)
-
-typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
-
-#pragma pack()
-
-#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
-#define bfa_q_next(_qe)	(((struct list_head *) (_qe))->next)
-#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
-
-/*
- * bfa_q_qe_init - to initialize a queue element
- */
-#define bfa_q_qe_init(_qe) {						\
-	bfa_q_next(_qe) = (struct list_head *) NULL;			\
-	bfa_q_prev(_qe) = (struct list_head *) NULL;			\
-}
-
-/*
- * bfa_q_deq - dequeue an element from head of the queue
- */
-#define bfa_q_deq(_q, _qe) {						\
-	if (!list_empty(_q)) {						\
-		(*((struct list_head **) (_qe))) = bfa_q_next(_q);	\
-		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
-						(struct list_head *) (_q); \
-		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
-		bfa_q_qe_init(*((struct list_head **) _qe));		\
-	} else {							\
-		*((struct list_head **)(_qe)) = NULL;			\
-	}								\
-}
-
-/*
- * bfa_q_deq_tail - dequeue an element from tail of the queue
- */
-#define bfa_q_deq_tail(_q, _qe) {					\
-	if (!list_empty(_q)) {						\
-		*((struct list_head **) (_qe)) = bfa_q_prev(_q);	\
-		bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =  \
-						(struct list_head *) (_q); \
-		bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
-		bfa_q_qe_init(*((struct list_head **) _qe));		\
-	} else {							\
-		*((struct list_head **) (_qe)) = (struct list_head *) NULL; \
-	}								\
-}
-
-/*
- * bfa_add_tail_head - enqueue an element at the head of queue
- */
-#define bfa_q_enq_head(_q, _qe) {					\
-	if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL))	\
-		pr_err("Assertion failure: %s:%d: %d",			\
-			__FILE__, __LINE__,				\
-		(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
-	bfa_q_next(_qe) = bfa_q_next(_q);				\
-	bfa_q_prev(_qe) = (struct list_head *) (_q);			\
-	bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe);	\
-	bfa_q_next(_q) = (struct list_head *) (_qe);			\
-}
-
 #endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index badea36..2e7fb97 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -33,7 +33,7 @@
 	u32 n;
 
 	if (request_firmware(&fw, fw_name, &pdev->dev)) {
-		pr_alert("Can't locate firmware %s\n", fw_name);
+		dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name);
 		goto error;
 	}
 
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 1ba3e3a..f0bcb15 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -8,8 +8,6 @@
 	default y
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
-	  Make sure you know the name of your card. Read the Ethernet-HOWTO,
-	  available from <http://www.tldp.org/docs.html#howto>.
 
 	  If unsure, say Y.
 
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index fc646a4..caeb395 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -54,6 +54,8 @@
 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 #define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 
+#define GEM_MTU_MIN_SIZE	68
+
 /*
  * Graceful stop timeouts in us. We should allow up to
  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -785,7 +787,7 @@
 		}
 		/* now everything is ready for receiving packet */
 		bp->rx_skbuff[entry] = NULL;
-		len = MACB_BFEXT(RX_FRMLEN, ctrl);
+		len = ctrl & bp->rx_frm_len_mask;
 
 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
 
@@ -831,7 +833,7 @@
 	struct macb_dma_desc *desc;
 
 	desc = macb_rx_desc(bp, last_frag);
-	len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+	len = desc->ctrl & bp->rx_frm_len_mask;
 
 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
 		macb_rx_ring_wrap(first_frag),
@@ -1651,7 +1653,10 @@
 	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
 	config |= MACB_BIT(PAE);		/* PAuse Enable */
 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
-	config |= MACB_BIT(BIG);		/* Receive oversized frames */
+	if (bp->caps & MACB_CAPS_JUMBO)
+		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
+	else
+		config |= MACB_BIT(BIG);	/* Receive oversized frames */
 	if (bp->dev->flags & IFF_PROMISC)
 		config |= MACB_BIT(CAF);	/* Copy All Frames */
 	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
@@ -1660,8 +1665,13 @@
 		config |= MACB_BIT(NBC);	/* No BroadCast */
 	config |= macb_dbw(bp);
 	macb_writel(bp, NCFGR, config);
+	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+		gem_writel(bp, JML, bp->jumbo_max_len);
 	bp->speed = SPEED_10;
 	bp->duplex = DUPLEX_HALF;
+	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
+	if (bp->caps & MACB_CAPS_JUMBO)
+		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
 
 	macb_configure_dma(bp);
 
@@ -1865,6 +1875,26 @@
 	return 0;
 }
 
+static int macb_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct macb *bp = netdev_priv(dev);
+	u32 max_mtu;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	max_mtu = ETH_DATA_LEN;
+	if (bp->caps & MACB_CAPS_JUMBO)
+		max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+
+	if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
 static void gem_update_stats(struct macb *bp)
 {
 	int i;
@@ -2141,7 +2171,7 @@
 	.ndo_get_stats		= macb_get_stats,
 	.ndo_do_ioctl		= macb_ioctl,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_change_mtu		= macb_change_mtu,
 	.ndo_set_mac_address	= eth_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= macb_poll_controller,
@@ -2683,6 +2713,13 @@
 	.init = macb_init,
 };
 
+static const struct macb_config sama5d2_config = {
+	.caps = 0,
+	.dma_burst_length = 16,
+	.clk_init = macb_clk_init,
+	.init = macb_init,
+};
+
 static const struct macb_config sama5d3_config = {
 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
 	.dma_burst_length = 16,
@@ -2702,6 +2739,16 @@
 	.init = at91ether_init,
 };
 
+
+static const struct macb_config zynqmp_config = {
+	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+		MACB_CAPS_JUMBO,
+	.dma_burst_length = 16,
+	.clk_init = macb_clk_init,
+	.init = macb_init,
+	.jumbo_max_len = 10240,
+};
+
 static const struct macb_config zynq_config = {
 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
 		MACB_CAPS_NO_GIGABIT_HALF,
@@ -2716,10 +2763,12 @@
 	{ .compatible = "cdns,macb" },
 	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
+	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
 	{ .compatible = "cdns,emac", .data = &emac_config },
+	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
 	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
 	{ /* sentinel */ }
 };
@@ -2789,6 +2838,10 @@
 	bp->pclk = pclk;
 	bp->hclk = hclk;
 	bp->tx_clk = tx_clk;
+	if (macb_config->jumbo_max_len) {
+		bp->jumbo_max_len = macb_config->jumbo_max_len;
+	}
+
 	spin_lock_init(&bp->lock);
 
 	/* setup capabilities */
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 24b1d9b..d746559 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -71,6 +71,7 @@
 #define GEM_NCFGR		0x0004 /* Network Config */
 #define GEM_USRIO		0x000c /* User IO */
 #define GEM_DMACFG		0x0010 /* DMA Configuration */
+#define GEM_JML			0x0048 /* Jumbo Max Length */
 #define GEM_HRB			0x0080 /* Hash Bottom */
 #define GEM_HRT			0x0084 /* Hash Top */
 #define GEM_SA1B		0x0088 /* Specific1 Bottom */
@@ -398,6 +399,7 @@
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
 #define MACB_CAPS_SG_DISABLED			0x40000000
 #define MACB_CAPS_MACB_IS_GEM			0x80000000
+#define MACB_CAPS_JUMBO				0x00000008
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)					\
@@ -515,6 +517,9 @@
 #define MACB_RX_BROADCAST_OFFSET		31
 #define MACB_RX_BROADCAST_SIZE			1
 
+#define MACB_RX_FRMLEN_MASK			0xFFF
+#define MACB_RX_JFRMLEN_MASK			0x3FFF
+
 /* RX checksum offload disabled: bit 24 clear in NCFGR */
 #define GEM_RX_TYPEID_MATCH_OFFSET		22
 #define GEM_RX_TYPEID_MATCH_SIZE		2
@@ -758,6 +763,7 @@
 	int	(*clk_init)(struct platform_device *pdev, struct clk **pclk,
 			    struct clk **hclk, struct clk **tx_clk);
 	int	(*init)(struct platform_device *pdev);
+	int	jumbo_max_len;
 };
 
 struct macb_queue {
@@ -827,6 +833,9 @@
 	unsigned int		max_tx_length;
 
 	u64			ethtool_stats[GEM_STATS_LEN];
+
+	unsigned int		rx_frm_len_mask;
+	unsigned int		jumbo_max_len;
 };
 
 static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644
index 0000000..c4d6bbe
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -0,0 +1,57 @@
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+	tristate "Cavium ethernet drivers"
+	depends on PCI
+	default y
+	---help---
+	  Select this option if you want enable Cavium network support.
+
+	  If you have a Cavium SoC or network adapter, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+	tristate "Thunder Physical function driver"
+	depends on 64BIT
+	default ARCH_THUNDER
+	select THUNDER_NIC_BGX
+	---help---
+	  This driver supports Thunder's NIC physical function.
+	  The NIC provides the controller and DMA engines to
+	  move network traffic to/from the memory. The NIC
+	  works closely with TNS, BGX and SerDes to implement the
+	  functions replacing and virtualizing those of a typical
+	  standalone PCIe NIC chip.
+
+config THUNDER_NIC_VF
+	tristate "Thunder Virtual function driver"
+	depends on 64BIT
+	default ARCH_THUNDER
+	---help---
+	  This driver supports Thunder's NIC virtual function
+
+config	THUNDER_NIC_BGX
+	tristate "Thunder MAC interface driver (BGX)"
+	depends on 64BIT
+	default ARCH_THUNDER
+	---help---
+	  This driver supports programming and controlling of MAC
+	  interface from NIC physical function driver.
+
+config LIQUIDIO
+	tristate "Cavium LiquidIO support"
+	depends on 64BIT
+	select PTP_1588_CLOCK
+	select FW_LOADER
+	select LIBCRC32C
+	---help---
+	  This driver supports Cavium LiquidIO Intelligent Server Adapters
+	  based on CN66XX and CN68XX chips.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called liquidio.  This is recommended.
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644
index 0000000..d22f886
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
new file mode 100644
index 0000000..2f36680
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -0,0 +1,16 @@
+#
+# Cavium Liquidio ethernet device driver
+#
+obj-$(CONFIG_LIQUIDIO) += liquidio.o
+
+liquidio-objs := lio_main.o  \
+	      lio_ethtool.o      \
+	      request_manager.o  \
+	      response_manager.o \
+	      octeon_device.o    \
+	      cn66xx_device.o    \
+	      cn68xx_device.o    \
+	      octeon_mem_ops.o   \
+	      octeon_droq.o      \
+	      octeon_console.o   \
+	      octeon_nic.o
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
new file mode 100644
index 0000000..8ad7425
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -0,0 +1,796 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+int lio_cn6xxx_soft_reset(struct octeon_device *oct)
+{
+	octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+	dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
+
+	lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
+	octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
+
+	lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
+	lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
+
+	/* make sure that the reset is written before starting timer */
+	mmiowb();
+
+	/* Wait for 10ms as Octeon resets. */
+	mdelay(100);
+
+	if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+		dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
+		return 1;
+	}
+
+	dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
+	octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+	return 0;
+}
+
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
+{
+	u32 val;
+
+	pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+	if (val & 0x000f0000) {
+		dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
+			val & 0x000f0000);
+	}
+
+	val |= 0xf;          /* Enable Link error reporting */
+
+	dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
+	pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+}
+
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+			       enum octeon_pcie_mps mps)
+{
+	u32 val;
+	u64 r64;
+
+	/* Read config register for MPS */
+	pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+	if (mps == PCIE_MPS_DEFAULT) {
+		mps = ((val & (0x7 << 5)) >> 5);
+	} else {
+		val &= ~(0x7 << 5);  /* Turn off any MPS bits */
+		val |= (mps << 5);   /* Set MPS */
+		pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+	}
+
+	/* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
+	r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+	r64 |= (mps << 4);
+	lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+				enum octeon_pcie_mrrs mrrs)
+{
+	u32 val;
+	u64 r64;
+
+	/* Read config register for MRRS */
+	pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+	if (mrrs == PCIE_MRRS_DEFAULT) {
+		mrrs = ((val & (0x7 << 12)) >> 12);
+	} else {
+		val &= ~(0x7 << 12); /* Turn off any MRRS bits */
+		val |= (mrrs << 12); /* Set MRRS */
+		pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+	}
+
+	/* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
+	r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
+	r64 |= mrrs;
+	octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
+
+	/* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
+	r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+	r64 |= mrrs;
+	lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
+{
+	/* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
+	 * for SLI.
+	 */
+	return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
+}
+
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
+			    u32 time_intr_in_us)
+{
+	/* This gives the SLI clock per microsec */
+	u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
+
+	/* core clock per us / oq ticks will be fractional. TO avoid that
+	 * we use the method below.
+	 */
+
+	/* This gives the clock cycles per millisecond */
+	oqticks_per_us *= 1000;
+
+	/* This gives the oq ticks (1024 core clock cycles) per millisecond */
+	oqticks_per_us /= 1024;
+
+	/* time_intr is in microseconds. The next 2 steps gives the oq ticks
+	 * corressponding to time_intr.
+	 */
+	oqticks_per_us *= time_intr_in_us;
+	oqticks_per_us /= 1000;
+
+	return oqticks_per_us;
+}
+
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
+{
+	/* Select Round-Robin Arb, ES, RO, NS for Input Queues */
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
+			 CN6XXX_INPUT_CTL_MASK);
+
+	/* Instruction Read Size - Max 4 instructions per PCIE Read */
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
+			   0xFFFFFFFFFFFFFFFFULL);
+
+	/* Select PCIE Port for all Input rings. */
+	octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
+			   (oct->pcie_port * 0x5555555555555555ULL));
+}
+
+static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+	u64 pktctl;
+
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+	pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+	/* 66XX SPECIFIC */
+	if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
+		/* Disable RING_EN if only upto 4 rings are used. */
+		pktctl &= ~(1 << 4);
+	else
+		pktctl |= (1 << 4);
+
+	if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
+		pktctl |= 0xF;
+	else
+		/* Disable per-port backpressure. */
+		pktctl &= ~0xF;
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
+{
+	u32 time_threshold;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+	/* / Select PCI-E Port for all Output queues */
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
+			   (oct->pcie_port * 0x5555555555555555ULL));
+
+	if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
+		octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
+	} else {
+		/* / Set Output queue watermark to 0 to disable backpressure */
+		octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
+	}
+
+	/* / Select Info Ptr for length & data */
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
+
+	/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
+
+	/* / Select ES,RO,NS setting from register for Output Queue Packet
+	 * Address
+	 */
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
+
+	/* No Relaxed Ordering, No Snoop, 64-bit swap for Output
+	 * Queue ScatterList
+	 */
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
+
+	/* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
+#ifdef __BIG_ENDIAN_BITFIELD
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
+			   0x5555555555555555ULL);
+#else
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
+#endif
+
+	/* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
+			   0x5555555555555555ULL);
+
+	/* / Set up interrupt packet and time threshold */
+	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+			 (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
+	time_threshold =
+		lio_cn6xxx_get_oq_ticks(oct, (u32)
+					CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
+
+	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+}
+
+static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
+{
+	lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+	lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
+	lio_cn6xxx_enable_error_reporting(oct);
+
+	lio_cn6xxx_setup_global_input_regs(oct);
+	lio_cn66xx_setup_pkt_ctl_regs(oct);
+	lio_cn6xxx_setup_global_output_regs(oct);
+
+	/* Default error timeout value should be 0x200000 to avoid host hang
+	 * when reads invalid register
+	 */
+	octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+	return 0;
+}
+
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+	/* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
+	octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
+
+	/* Write the start of the input queue's ring and its size  */
+	octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
+			   iq->base_addr_dma);
+	octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+	/* Remember the doorbell & instruction count register addr for this
+	 * queue
+	 */
+	iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
+	iq->inst_cnt_reg = oct->mmio[0].hw_addr
+			   + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
+	dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+		iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+	/* Store the current instruction counter
+	 * (used in flush_iq calculation)
+	 */
+	iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
+}
+
+static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+	lio_cn6xxx_setup_iq_regs(oct, iq_no);
+
+	/* Backpressure for this queue - WMARK set to all F's. This effectively
+	 * disables the backpressure mechanism.
+	 */
+	octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
+			   (0xFFFFFFFFULL << 32));
+}
+
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+	u32 intr;
+	struct octeon_droq *droq = oct->droq[oq_no];
+
+	octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
+			   droq->desc_ring_dma);
+	octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+	octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+			 (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+	/* Get the mapped address of the pkt_sent and pkts_credit regs */
+	droq->pkts_sent_reg =
+		oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
+	droq->pkts_credit_reg =
+		oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+	/* Enable this output queue to generate Packet Timer Interrupt */
+	intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+	intr |= (1 << oq_no);
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
+
+	/* Enable this output queue to generate Packet Timer Interrupt */
+	intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+	intr |= (1 << oq_no);
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
+}
+
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+{
+	u32 mask;
+
+	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
+	mask |= oct->io_qmask.iq64B;
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
+
+	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+	mask |= oct->io_qmask.iq;
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+	mask |= oct->io_qmask.oq;
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+}
+
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
+{
+	u32 mask, i, loop = HZ;
+	u32 d32;
+
+	/* Reset the Enable bits for Input Queues. */
+	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+	mask ^= oct->io_qmask.iq;
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+	/* Wait until hardware indicates that the queues are out of reset. */
+	mask = oct->io_qmask.iq;
+	d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+	while (((d32 & mask) != mask) && loop--) {
+		d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+		schedule_timeout_uninterruptible(1);
+	}
+
+	/* Reset the doorbell register for each Input queue. */
+	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+		if (!(oct->io_qmask.iq & (1UL << i)))
+			continue;
+		octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
+		d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
+	}
+
+	/* Reset the Enable bits for Output Queues. */
+	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+	mask ^= oct->io_qmask.oq;
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+	/* Wait until hardware indicates that the queues are out of reset. */
+	loop = HZ;
+	mask = oct->io_qmask.oq;
+	d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+	while (((d32 & mask) != mask) && loop--) {
+		d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+		schedule_timeout_uninterruptible(1);
+	}
+	;
+
+	/* Reset the doorbell register for each Output queue. */
+	/* for (i = 0; i < oct->num_oqs; i++) { */
+	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+		if (!(oct->io_qmask.oq & (1UL << i)))
+			continue;
+		octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
+		d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
+
+		d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
+		octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
+	}
+
+	d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+	if (d32)
+		octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
+
+	d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+	if (d32)
+		octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
+}
+
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
+{
+	u32 i;
+
+	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+		if (!(oct->io_qmask.iq & (1UL << i)))
+			continue;
+		oct->fn_list.setup_iq_regs(oct, i);
+	}
+
+	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+		if (!(oct->io_qmask.oq & (1UL << i)))
+			continue;
+		oct->fn_list.setup_oq_regs(oct, i);
+	}
+
+	oct->fn_list.setup_device_regs(oct);
+
+	oct->fn_list.enable_interrupt(oct->chip);
+
+	oct->fn_list.enable_io_queues(oct);
+
+	/* for (i = 0; i < oct->num_oqs; i++) { */
+	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+		if (!(oct->io_qmask.oq & (1UL << i)))
+			continue;
+		writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
+	}
+}
+
+void
+lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
+			  u64 core_addr,
+			  u32 idx,
+			  int valid)
+{
+	u64 bar1;
+
+	if (valid == 0) {
+		bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+		lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
+			       CN6XXX_BAR1_REG(idx, oct->pcie_port));
+		bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+		return;
+	}
+
+	/* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
+	 * the Core Addr
+	 */
+	lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+		       CN6XXX_BAR1_REG(idx, oct->pcie_port));
+
+	bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
+			       u32 idx,
+			       u32 mask)
+{
+	lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+	return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+			     struct octeon_instr_queue *iq)
+{
+	u32 new_idx = readl(iq->inst_cnt_reg);
+
+	/* The new instr cnt reg is a 32-bit counter that can roll over. We have
+	 * noted the counter's initial value at init time into
+	 * reset_instr_cnt
+	 */
+	if (iq->reset_instr_cnt < new_idx)
+		new_idx -= iq->reset_instr_cnt;
+	else
+		new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+	/* Modulo of the new index with the IQ size will give us
+	 * the new index.
+	 */
+	new_idx %= iq->max_count;
+
+	return new_idx;
+}
+
+void lio_cn6xxx_enable_interrupt(void *chip)
+{
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+	u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
+
+	/* Enable Interrupt */
+	writeq(mask, cn6xxx->intr_enb_reg64);
+}
+
+void lio_cn6xxx_disable_interrupt(void *chip)
+{
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+	/* Disable Interrupts */
+	writeq(0, cn6xxx->intr_enb_reg64);
+
+	/* make sure interrupts are really disabled */
+	mmiowb();
+}
+
+static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
+{
+	/* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
+	 * to determine the PCIE port #
+	 */
+	oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
+
+	dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
+}
+
+void
+lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
+{
+	dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
+		CVM_CAST64(intr64));
+}
+
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+{
+	struct octeon_droq *droq;
+	u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+	u32 droq_cnt_enb, droq_cnt_mask;
+
+	droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+	droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+	droq_mask = droq_cnt_mask & droq_cnt_enb;
+
+	droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+	droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+	droq_mask |= (droq_time_mask & droq_int_enb);
+
+	droq_mask &= oct->io_qmask.oq;
+
+	oct->droq_intr = 0;
+
+	/* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
+	for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+		if (!(droq_mask & (1 << oq_no)))
+			continue;
+
+		droq = oct->droq[oq_no];
+		pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+		if (pkt_count) {
+			oct->droq_intr |= (1ULL << oq_no);
+			if (droq->ops.poll_mode) {
+				u32 value;
+				u32 reg;
+
+				struct octeon_cn6xxx *cn6xxx =
+					(struct octeon_cn6xxx *)oct->chip;
+
+				/* disable interrupts for this droq */
+				spin_lock
+					(&cn6xxx->lock_for_droq_int_enb_reg);
+				reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
+				value = octeon_read_csr(oct, reg);
+				value &= ~(1 << oq_no);
+				octeon_write_csr(oct, reg, value);
+				reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
+				value = octeon_read_csr(oct, reg);
+				value &= ~(1 << oq_no);
+				octeon_write_csr(oct, reg, value);
+
+				/* Ensure that the enable register is written.
+				 */
+				mmiowb();
+
+				spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
+			}
+		}
+	}
+
+	droq_time_mask &= oct->io_qmask.oq;
+	droq_cnt_mask &= oct->io_qmask.oq;
+
+	/* Reset the PKT_CNT/TIME_INT registers. */
+	if (droq_time_mask)
+		octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
+
+	if (droq_cnt_mask)      /* reset PKT_CNT register:66xx */
+		octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
+
+	return 0;
+}
+
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
+{
+	struct octeon_device *oct = (struct octeon_device *)dev;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+	u64 intr64;
+
+	intr64 = readq(cn6xxx->intr_sum_reg64);
+
+	/* If our device has interrupted, then proceed.
+	 * Also check for all f's if interrupt was triggered on an error
+	 * and the PCI read fails.
+	 */
+	if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
+		return IRQ_NONE;
+
+	oct->int_status = 0;
+
+	if (intr64 & CN6XXX_INTR_ERR)
+		lio_cn6xxx_process_pcie_error_intr(oct, intr64);
+
+	if (intr64 & CN6XXX_INTR_PKT_DATA) {
+		lio_cn6xxx_process_droq_intr_regs(oct);
+		oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+	}
+
+	if (intr64 & CN6XXX_INTR_DMA0_FORCE)
+		oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+
+	if (intr64 & CN6XXX_INTR_DMA1_FORCE)
+		oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+	/* Clear the current interrupts */
+	writeq(intr64, cn6xxx->intr_sum_reg64);
+
+	return IRQ_HANDLED;
+}
+
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
+				  void *chip,
+				  struct octeon_reg_list *reg_list)
+{
+	u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+	reg_list->pci_win_wr_addr_hi =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
+	reg_list->pci_win_wr_addr_lo =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
+	reg_list->pci_win_wr_addr =
+		(u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
+
+	reg_list->pci_win_rd_addr_hi =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
+	reg_list->pci_win_rd_addr_lo =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
+	reg_list->pci_win_rd_addr =
+		(u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
+
+	reg_list->pci_win_wr_data_hi =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
+	reg_list->pci_win_wr_data_lo =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
+	reg_list->pci_win_wr_data =
+		(u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
+
+	reg_list->pci_win_rd_data_hi =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
+	reg_list->pci_win_rd_data_lo =
+		(u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
+	reg_list->pci_win_rd_data =
+		(u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
+
+	lio_cn6xxx_get_pcie_qlmport(oct);
+
+	cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
+	cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
+	cn6xxx->intr_enb_reg64 =
+		bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
+}
+
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
+{
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+	if (octeon_map_pci_barx(oct, 0, 0))
+		return 1;
+
+	if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+		dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
+			__func__);
+		octeon_unmap_pci_barx(oct, 0);
+		return 1;
+	}
+
+	spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
+
+	oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
+	oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+	oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
+	oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
+	oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+	oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+	oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+	oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+	oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+	oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+	oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+	oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+	lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+	cn6xxx->conf = (struct octeon_config *)
+		       oct_get_config_info(oct, LIO_210SV);
+	if (!cn6xxx->conf) {
+		dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
+			__func__);
+		octeon_unmap_pci_barx(oct, 0);
+		octeon_unmap_pci_barx(oct, 1);
+		return 1;
+	}
+
+	oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+	return 0;
+}
+
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+				    struct octeon_config *conf6xxx)
+{
+	/* int total_instrs = 0; */
+
+	if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
+		dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+			__func__, CFG_GET_IQ_MAX_Q(conf6xxx),
+			CN6XXX_MAX_INPUT_QUEUES);
+		return 1;
+	}
+
+	if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
+		dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+			__func__, CFG_GET_OQ_MAX_Q(conf6xxx),
+			CN6XXX_MAX_OUTPUT_QUEUES);
+		return 1;
+	}
+
+	if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
+	    CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
+		dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+			__func__);
+		return 1;
+	}
+	if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
+	    !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+		dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+			__func__);
+		return 1;
+	}
+
+	if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
+		dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
+			__func__);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
new file mode 100644
index 0000000..f779187
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -0,0 +1,107 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file  cn66xx_device.h
+ *  \brief Host Driver: Routines that perform CN66XX specific operations.
+ */
+
+#ifndef __CN66XX_DEVICE_H__
+#define  __CN66XX_DEVICE_H__
+
+/* Register address and configuration for a CN6XXX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn6xxx {
+	/** PCI interrupt summary register */
+	u8 __iomem *intr_sum_reg64;
+
+	/** PCI interrupt enable register */
+	u8 __iomem *intr_enb_reg64;
+
+	/** The PCI interrupt mask used by interrupt handler */
+	u64 intr_mask64;
+
+	struct octeon_config *conf;
+
+	/* Example additional fields - not used currently
+	 *  struct {
+	 *  }cn6xyz;
+	 */
+
+	/* For the purpose of atomic access to interrupt enable reg */
+	spinlock_t lock_for_droq_int_enb_reg;
+
+};
+
+enum octeon_pcie_mps {
+	PCIE_MPS_DEFAULT = -1,	/* Use the default setup by BIOS */
+	PCIE_MPS_128B = 0,
+	PCIE_MPS_256B = 1
+};
+
+enum octeon_pcie_mrrs {
+	PCIE_MRRS_DEFAULT = -1,	/* Use the default setup by BIOS */
+	PCIE_MRRS_128B = 0,
+	PCIE_MRRS_256B = 1,
+	PCIE_MRRS_512B = 2,
+	PCIE_MRRS_1024B = 3,
+	PCIE_MRRS_2048B = 4,
+	PCIE_MRRS_4096B = 5
+};
+
+/* Common functions for 66xx and 68xx */
+int lio_cn6xxx_soft_reset(struct octeon_device *oct);
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct);
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+			       enum octeon_pcie_mps mps);
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+				enum octeon_pcie_mrrs mrrs);
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
+void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+			       u32 idx, int valid);
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+			     struct octeon_instr_queue *iq);
+void lio_cn6xxx_enable_interrupt(void *chip);
+void lio_cn6xxx_disable_interrupt(void *chip);
+void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
+				  struct octeon_reg_list *reg_list);
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+int lio_setup_cn66xx_octeon_device(struct octeon_device *);
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+				    struct octeon_config *);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
new file mode 100644
index 0000000..5e3aff2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -0,0 +1,535 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_regs.h
+ *  \brief Host Driver: Register Address and Register Mask values for
+ *  Octeon CN66XX devices.
+ */
+
+#ifndef __CN66XX_REGS_H__
+#define __CN66XX_REGS_H__
+
+#define     CN6XXX_XPANSION_BAR             0x30
+
+#define     CN6XXX_MSI_CAP                  0x50
+#define     CN6XXX_MSI_ADDR_LO              0x54
+#define     CN6XXX_MSI_ADDR_HI              0x58
+#define     CN6XXX_MSI_DATA                 0x5C
+
+#define     CN6XXX_PCIE_CAP                 0x70
+#define     CN6XXX_PCIE_DEVCAP              0x74
+#define     CN6XXX_PCIE_DEVCTL              0x78
+#define     CN6XXX_PCIE_LINKCAP             0x7C
+#define     CN6XXX_PCIE_LINKCTL             0x80
+#define     CN6XXX_PCIE_SLOTCAP             0x84
+#define     CN6XXX_PCIE_SLOTCTL             0x88
+
+#define     CN6XXX_PCIE_ENH_CAP             0x100
+#define     CN6XXX_PCIE_UNCORR_ERR_STATUS   0x104
+#define     CN6XXX_PCIE_UNCORR_ERR_MASK     0x108
+#define     CN6XXX_PCIE_UNCORR_ERR          0x10C
+#define     CN6XXX_PCIE_CORR_ERR_STATUS     0x110
+#define     CN6XXX_PCIE_CORR_ERR_MASK       0x114
+#define     CN6XXX_PCIE_ADV_ERR_CAP         0x118
+
+#define     CN6XXX_PCIE_ACK_REPLAY_TIMER    0x700
+#define     CN6XXX_PCIE_OTHER_MSG           0x704
+#define     CN6XXX_PCIE_PORT_FORCE_LINK     0x708
+#define     CN6XXX_PCIE_ACK_FREQ            0x70C
+#define     CN6XXX_PCIE_PORT_LINK_CTL       0x710
+#define     CN6XXX_PCIE_LANE_SKEW           0x714
+#define     CN6XXX_PCIE_SYM_NUM             0x718
+#define     CN6XXX_PCIE_FLTMSK              0x720
+
+/* ##############  BAR0 Registers ################  */
+
+#define    CN6XXX_SLI_CTL_PORT0                    0x0050
+#define    CN6XXX_SLI_CTL_PORT1                    0x0060
+
+#define    CN6XXX_SLI_WINDOW_CTL                   0x02E0
+#define    CN6XXX_SLI_DBG_DATA                     0x0310
+#define    CN6XXX_SLI_SCRATCH1                     0x03C0
+#define    CN6XXX_SLI_SCRATCH2                     0x03D0
+#define    CN6XXX_SLI_CTL_STATUS                   0x0570
+
+#define    CN6XXX_WIN_WR_ADDR_LO                   0x0000
+#define    CN6XXX_WIN_WR_ADDR_HI                   0x0004
+#define    CN6XXX_WIN_WR_ADDR64                    CN6XXX_WIN_WR_ADDR_LO
+
+#define    CN6XXX_WIN_RD_ADDR_LO                   0x0010
+#define    CN6XXX_WIN_RD_ADDR_HI                   0x0014
+#define    CN6XXX_WIN_RD_ADDR64                    CN6XXX_WIN_RD_ADDR_LO
+
+#define    CN6XXX_WIN_WR_DATA_LO                   0x0020
+#define    CN6XXX_WIN_WR_DATA_HI                   0x0024
+#define    CN6XXX_WIN_WR_DATA64                    CN6XXX_WIN_WR_DATA_LO
+
+#define    CN6XXX_WIN_RD_DATA_LO                   0x0040
+#define    CN6XXX_WIN_RD_DATA_HI                   0x0044
+#define    CN6XXX_WIN_RD_DATA64                    CN6XXX_WIN_RD_DATA_LO
+
+#define    CN6XXX_WIN_WR_MASK_LO                   0x0030
+#define    CN6XXX_WIN_WR_MASK_HI                   0x0034
+#define    CN6XXX_WIN_WR_MASK_REG                  CN6XXX_WIN_WR_MASK_LO
+
+/* 1 register (32-bit) to enable Input queues */
+#define    CN6XXX_SLI_PKT_INSTR_ENB               0x1000
+
+/* 1 register (32-bit) to enable Output queues */
+#define    CN6XXX_SLI_PKT_OUT_ENB                 0x1010
+
+/* 1 register (32-bit) to determine whether Output queues are in reset. */
+#define    CN6XXX_SLI_PORT_IN_RST_OQ              0x11F0
+
+/* 1 register (32-bit) to determine whether Input queues are in reset. */
+#define    CN6XXX_SLI_PORT_IN_RST_IQ              0x11F4
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 1 register (32-bit) - instr. size of each input queue. */
+#define    CN6XXX_SLI_PKT_INSTR_SIZE             0x1020
+
+/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define    CN6XXX_SLI_IQ_INSTR_COUNT_START       0x2000
+
+/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */
+#define    CN6XXX_SLI_IQ_BASE_ADDR_START64       0x2800
+
+/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define    CN6XXX_SLI_IQ_DOORBELL_START          0x2C00
+
+/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define    CN6XXX_SLI_IQ_SIZE_START              0x3000
+
+/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */
+#define    CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64   0x3400
+
+/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */
+#define    CN66XX_SLI_INPUT_BP_START64           0x3800
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define    CN6XXX_IQ_OFFSET                      0x10
+
+/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT_INPUT_CONTROL.
+ */
+#define    CN6XXX_SLI_PKT_INPUT_CONTROL          0x1170
+
+/* 1 register (64-bit) - Number of instructions to read at one time
+ * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE.
+ */
+#define    CN6XXX_SLI_PKT_INSTR_RD_SIZE          0x11A0
+
+/* 1 register (64-bit) - Assign Input ring to MAC port
+ * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT.
+ */
+#define    CN6XXX_SLI_IN_PCIE_PORT               0x11B0
+
+/*------- Request Queue Macros ---------*/
+#define    CN6XXX_SLI_IQ_BASE_ADDR64(iq)          \
+	(CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define    CN6XXX_SLI_IQ_SIZE(iq)                 \
+	(CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define    CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq)      \
+	(CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define    CN6XXX_SLI_IQ_DOORBELL(iq)             \
+	(CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define    CN6XXX_SLI_IQ_INSTR_COUNT(iq)          \
+	(CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define    CN66XX_SLI_IQ_BP64(iq)                 \
+	(CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define    CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB         BIT(22)
+#define    CN6XXX_INPUT_CTL_DATA_NS                 BIT(8)
+#define    CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP        BIT(6)
+#define    CN6XXX_INPUT_CTL_DATA_RO                 BIT(5)
+#define    CN6XXX_INPUT_CTL_USE_CSR                 BIT(4)
+#define    CN6XXX_INPUT_CTL_GATHER_NS               BIT(3)
+#define    CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP      BIT(2)
+#define    CN6XXX_INPUT_CTL_GATHER_RO               BIT(1)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define    CN6XXX_INPUT_CTL_MASK                    \
+	(CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP      \
+	  | CN6XXX_INPUT_CTL_USE_CSR              \
+	  | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP)
+#else
+#define    CN6XXX_INPUT_CTL_MASK                    \
+	(CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP     \
+	  | CN6XXX_INPUT_CTL_USE_CSR)
+#endif
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define    CN6XXX_SLI_OQ0_BUFF_INFO_SIZE         0x0C00
+
+/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define    CN6XXX_SLI_OQ_BASE_ADDR_START64       0x1400
+
+/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define    CN6XXX_SLI_OQ_PKT_CREDITS_START       0x1800
+
+/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define    CN6XXX_SLI_OQ_SIZE_START              0x1C00
+
+/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define    CN6XXX_SLI_OQ_PKT_SENT_START          0x2400
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define    CN6XXX_OQ_OFFSET                      0x10
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_ROR
+ */
+#define    CN6XXX_SLI_PKT_SLIST_ROR              0x1030
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_NS
+ */
+#define    CN6XXX_SLI_PKT_SLIST_NS               0x1040
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue descriptors
+ * - SLI_PKT_SLIST_ES
+ */
+#define    CN6XXX_SLI_PKT_SLIST_ES64             0x1050
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - InfoPtr mode for Output Queues.
+ * - SLI_PKT_IPTR
+ */
+#define    CN6XXX_SLI_PKT_IPTR                   0x1070
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - DPTR format selector for Output queues.
+ * - SLI_PKT_DPADDR
+ */
+#define    CN6XXX_SLI_PKT_DPADDR                 0x1080
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_ROR
+ */
+#define    CN6XXX_SLI_PKT_DATA_OUT_ROR           0x1090
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_NS
+ */
+#define    CN6XXX_SLI_PKT_DATA_OUT_NS            0x10A0
+
+/* 1 register (64-bit)  - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue data
+ * - SLI_PKT_DATA_OUT_ES
+ */
+#define    CN6XXX_SLI_PKT_DATA_OUT_ES64          0x10B0
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets.
+ * - SLI_PKT_OUT_BMODE
+ */
+#define    CN6XXX_SLI_PKT_OUT_BMODE              0x10D0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Assign PCIE port for Output queues
+ * - SLI_PKT_PCIE_PORT.
+ */
+#define    CN6XXX_SLI_PKT_PCIE_PORT64            0x10E0
+
+/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold
+ * & Time Threshold. The same setting applies to all 32 queues.
+ * The register is defined as a 64-bit registers, but we use the
+ * 32-bit offsets to define distinct addresses.
+ */
+#define    CN6XXX_SLI_OQ_INT_LEVEL_PKTS          0x1120
+#define    CN6XXX_SLI_OQ_INT_LEVEL_TIME          0x1124
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define    CN6XXX_SLI_OQ_WMARK                   0x1180
+
+/* 1 register to control output queue global backpressure & ring enable. */
+#define    CN6XXX_SLI_PKT_CTL                    0x1220
+
+/*------- Output Queue Macros ---------*/
+#define    CN6XXX_SLI_OQ_BASE_ADDR64(oq)          \
+	(CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define    CN6XXX_SLI_OQ_SIZE(oq)                 \
+	(CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define    CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq)                 \
+	(CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define    CN6XXX_SLI_OQ_PKTS_SENT(oq)            \
+	(CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define    CN6XXX_SLI_OQ_PKTS_CREDIT(oq)          \
+	(CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define    CN6XXX_DMA_CNT_START                   0x0400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values
+ * SLI_DMA_0_TIM
+ */
+#define    CN6XXX_DMA_TIM_START                   0x0420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define    CN6XXX_DMA_INT_LEVEL_START             0x03E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define    CN6XXX_DMA_OFFSET                      0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define    CN6XXX_DMA_CNT(dq)                      \
+	(CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define    CN6XXX_DMA_INT_LEVEL(dq)                \
+	(CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define    CN6XXX_DMA_PKT_INT_LEVEL(dq)            \
+	(CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define    CN6XXX_DMA_TIME_INT_LEVEL(dq)           \
+	(CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define    CN6XXX_DMA_TIM(dq)                      \
+	(CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define    CN6XXX_SLI_INT_SUM64                  0x0330
+
+/* 1 register (64-bit) for Interrupt Enable */
+#define    CN6XXX_SLI_INT_ENB64_PORT0            0x0340
+#define    CN6XXX_SLI_INT_ENB64_PORT1            0x0350
+
+/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */
+#define    CN6XXX_SLI_PKT_CNT_INT_ENB            0x1150
+
+/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */
+#define    CN6XXX_SLI_PKT_TIME_INT_ENB           0x1160
+
+/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */
+#define    CN6XXX_SLI_PKT_CNT_INT                0x1130
+
+/* 1 register (32-bit) to indicate which Output Queue reached time threshold */
+#define    CN6XXX_SLI_PKT_TIME_INT               0x1140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define    CN6XXX_INTR_RML_TIMEOUT_ERR           BIT(1)
+#define    CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR       BIT(2)
+#define    CN6XXX_INTR_IO2BIG_ERR                BIT(3)
+#define    CN6XXX_INTR_PKT_COUNT                 BIT(4)
+#define    CN6XXX_INTR_PKT_TIME                  BIT(5)
+#define    CN6XXX_INTR_M0UPB0_ERR                BIT(8)
+#define    CN6XXX_INTR_M0UPWI_ERR                BIT(9)
+#define    CN6XXX_INTR_M0UNB0_ERR                BIT(10)
+#define    CN6XXX_INTR_M0UNWI_ERR                BIT(11)
+#define    CN6XXX_INTR_M1UPB0_ERR                BIT(12)
+#define    CN6XXX_INTR_M1UPWI_ERR                BIT(13)
+#define    CN6XXX_INTR_M1UNB0_ERR                BIT(14)
+#define    CN6XXX_INTR_M1UNWI_ERR                BIT(15)
+#define    CN6XXX_INTR_MIO_INT0                  BIT(16)
+#define    CN6XXX_INTR_MIO_INT1                  BIT(17)
+#define    CN6XXX_INTR_MAC_INT0                  BIT(18)
+#define    CN6XXX_INTR_MAC_INT1                  BIT(19)
+
+#define    CN6XXX_INTR_DMA0_FORCE                BIT_ULL(32)
+#define    CN6XXX_INTR_DMA1_FORCE                BIT_ULL(33)
+#define    CN6XXX_INTR_DMA0_COUNT                BIT_ULL(34)
+#define    CN6XXX_INTR_DMA1_COUNT                BIT_ULL(35)
+#define    CN6XXX_INTR_DMA0_TIME                 BIT_ULL(36)
+#define    CN6XXX_INTR_DMA1_TIME                 BIT_ULL(37)
+#define    CN6XXX_INTR_INSTR_DB_OF_ERR           BIT_ULL(48)
+#define    CN6XXX_INTR_SLIST_DB_OF_ERR           BIT_ULL(49)
+#define    CN6XXX_INTR_POUT_ERR                  BIT_ULL(50)
+#define    CN6XXX_INTR_PIN_BP_ERR                BIT_ULL(51)
+#define    CN6XXX_INTR_PGL_ERR                   BIT_ULL(52)
+#define    CN6XXX_INTR_PDI_ERR                   BIT_ULL(53)
+#define    CN6XXX_INTR_POP_ERR                   BIT_ULL(54)
+#define    CN6XXX_INTR_PINS_ERR                  BIT_ULL(55)
+#define    CN6XXX_INTR_SPRT0_ERR                 BIT_ULL(56)
+#define    CN6XXX_INTR_SPRT1_ERR                 BIT_ULL(57)
+#define    CN6XXX_INTR_ILL_PAD_ERR               BIT_ULL(60)
+
+#define    CN6XXX_INTR_DMA0_DATA                 (CN6XXX_INTR_DMA0_TIME)
+
+#define    CN6XXX_INTR_DMA1_DATA                 (CN6XXX_INTR_DMA1_TIME)
+
+#define    CN6XXX_INTR_DMA_DATA                  \
+	(CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA)
+
+#define    CN6XXX_INTR_PKT_DATA                  (CN6XXX_INTR_PKT_TIME | \
+						  CN6XXX_INTR_PKT_COUNT)
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define    CN6XXX_INTR_PCIE_DATA                 \
+	(CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA)
+
+#define    CN6XXX_INTR_MIO                       \
+	(CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1)
+
+#define    CN6XXX_INTR_MAC                       \
+	(CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1)
+
+/* Sum of interrupts for error events */
+#define    CN6XXX_INTR_ERR                       \
+	(CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR    \
+	   | CN6XXX_INTR_IO2BIG_ERR             \
+	   | CN6XXX_INTR_M0UPB0_ERR             \
+	   | CN6XXX_INTR_M0UPWI_ERR             \
+	   | CN6XXX_INTR_M0UNB0_ERR             \
+	   | CN6XXX_INTR_M0UNWI_ERR             \
+	   | CN6XXX_INTR_M1UPB0_ERR             \
+	   | CN6XXX_INTR_M1UPWI_ERR             \
+	   | CN6XXX_INTR_M1UPB0_ERR             \
+	   | CN6XXX_INTR_M1UNWI_ERR             \
+	   | CN6XXX_INTR_INSTR_DB_OF_ERR        \
+	   | CN6XXX_INTR_SLIST_DB_OF_ERR        \
+	   | CN6XXX_INTR_POUT_ERR               \
+	   | CN6XXX_INTR_PIN_BP_ERR             \
+	   | CN6XXX_INTR_PGL_ERR                \
+	   | CN6XXX_INTR_PDI_ERR                \
+	   | CN6XXX_INTR_POP_ERR                \
+	   | CN6XXX_INTR_PINS_ERR               \
+	   | CN6XXX_INTR_SPRT0_ERR              \
+	   | CN6XXX_INTR_SPRT1_ERR              \
+	   | CN6XXX_INTR_ILL_PAD_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define    CN6XXX_INTR_MASK                      \
+	(CN6XXX_INTR_PCIE_DATA              \
+	   | CN6XXX_INTR_DMA0_FORCE             \
+	   | CN6XXX_INTR_DMA1_FORCE             \
+	   | CN6XXX_INTR_MIO                    \
+	   | CN6XXX_INTR_MAC                    \
+	   | CN6XXX_INTR_ERR)
+
+#define    CN6XXX_SLI_S2M_PORT0_CTL              0x3D80
+#define    CN6XXX_SLI_S2M_PORT1_CTL              0x3D90
+#define    CN6XXX_SLI_S2M_PORTX_CTL(port)        \
+	(CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10))
+
+#define    CN6XXX_SLI_INT_ENB64(port)            \
+	(CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10))
+
+#define    CN6XXX_SLI_MAC_NUMBER                 0x3E00
+
+/* CN6XXX BAR1 Index registers. */
+#define    CN6XXX_PEM_BAR1_INDEX000                0x00011800C00000A8ULL
+#define    CN6XXX_PEM_OFFSET                       0x0000000001000000ULL
+
+#define    CN6XXX_BAR1_INDEX_START                 CN6XXX_PEM_BAR1_INDEX000
+#define    CN6XXX_PCI_BAR1_OFFSET                  0x8
+
+#define    CN6XXX_BAR1_REG(idx, port) \
+		(CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \
+		(CN6XXX_PCI_BAR1_OFFSET * (idx)))
+
+/*############################ DPI #########################*/
+
+#define    CN6XXX_DPI_CTL                 0x0001df0000000040ULL
+
+#define    CN6XXX_DPI_DMA_CONTROL         0x0001df0000000048ULL
+
+#define    CN6XXX_DPI_REQ_GBL_ENB         0x0001df0000000050ULL
+
+#define    CN6XXX_DPI_REQ_ERR_RSP         0x0001df0000000058ULL
+
+#define    CN6XXX_DPI_REQ_ERR_RST         0x0001df0000000060ULL
+
+#define    CN6XXX_DPI_DMA_ENG0_ENB        0x0001df0000000080ULL
+
+#define    CN6XXX_DPI_DMA_ENG_ENB(q_no)   \
+	(CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8))
+
+#define    CN6XXX_DPI_DMA_ENG0_BUF        0x0001df0000000880ULL
+
+#define    CN6XXX_DPI_DMA_ENG_BUF(q_no)   \
+	(CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8))
+
+#define    CN6XXX_DPI_SLI_PRT0_CFG        0x0001df0000000900ULL
+#define    CN6XXX_DPI_SLI_PRT1_CFG        0x0001df0000000908ULL
+#define    CN6XXX_DPI_SLI_PRTX_CFG(port)        \
+	(CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10))
+
+#define    CN6XXX_DPI_DMA_COMMIT_MODE     BIT_ULL(58)
+#define    CN6XXX_DPI_DMA_PKT_HP          BIT_ULL(57)
+#define    CN6XXX_DPI_DMA_PKT_EN          BIT_ULL(56)
+#define    CN6XXX_DPI_DMA_O_ES            BIT_ULL(15)
+#define    CN6XXX_DPI_DMA_O_MODE          BIT_ULL(14)
+
+#define    CN6XXX_DPI_DMA_CTL_MASK             \
+	(CN6XXX_DPI_DMA_COMMIT_MODE    |    \
+	 CN6XXX_DPI_DMA_PKT_HP         |    \
+	 CN6XXX_DPI_DMA_PKT_EN         |    \
+	 CN6XXX_DPI_DMA_O_ES           |    \
+	 CN6XXX_DPI_DMA_O_MODE)
+
+/*############################ CIU #########################*/
+
+#define    CN6XXX_CIU_SOFT_BIST           0x0001070000000738ULL
+#define    CN6XXX_CIU_SOFT_RST            0x0001070000000740ULL
+
+/*############################ MIO #########################*/
+#define    CN6XXX_MIO_PTP_CLOCK_CFG       0x0001070000000f00ULL
+#define    CN6XXX_MIO_PTP_CLOCK_LO        0x0001070000000f08ULL
+#define    CN6XXX_MIO_PTP_CLOCK_HI        0x0001070000000f10ULL
+#define    CN6XXX_MIO_PTP_CLOCK_COMP      0x0001070000000f18ULL
+#define    CN6XXX_MIO_PTP_TIMESTAMP       0x0001070000000f20ULL
+#define    CN6XXX_MIO_PTP_EVT_CNT         0x0001070000000f28ULL
+#define    CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define    CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define    CN6XXX_MIO_PTP_CKOUT_HI_INCR   0x0001070000000f40ULL
+#define    CN6XXX_MIO_PTP_CKOUT_LO_INCR   0x0001070000000f48ULL
+#define    CN6XXX_MIO_PTP_PPS_THRESH_LO   0x0001070000000f50ULL
+#define    CN6XXX_MIO_PTP_PPS_THRESH_HI   0x0001070000000f58ULL
+#define    CN6XXX_MIO_PTP_PPS_HI_INCR     0x0001070000000f60ULL
+#define    CN6XXX_MIO_PTP_PPS_LO_INCR     0x0001070000000f68ULL
+
+#define    CN6XXX_MIO_QLM4_CFG            0x00011800000015B0ULL
+#define    CN6XXX_MIO_RST_BOOT            0x0001180000001600ULL
+
+#define    CN6XXX_MIO_QLM_CFG_MASK        0x7
+
+/*############################ LMC #########################*/
+
+#define    CN6XXX_LMC0_RESET_CTL               0x0001180088000180ULL
+#define    CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK  0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
new file mode 100644
index 0000000..8e830d0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
+{
+	u32 i;
+	u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 };
+
+	lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL);
+	dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n",
+		lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL));
+
+	for (i = 0; i < 6; i++) {
+		/* Prevent service of instruction queue for all DMA engines
+		 * Engine 5 will remain 0. Engines 0 - 4 will be setup by
+		 * core.
+		 */
+		lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i));
+		lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i));
+		dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i,
+			lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i)));
+	}
+
+	/* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set
+	 * separately.
+	 */
+
+	lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL);
+	dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n",
+		lio_pci_readq(oct, CN6XXX_DPI_CTL));
+}
+
+static int lio_cn68xx_soft_reset(struct octeon_device *oct)
+{
+	lio_cn6xxx_soft_reset(oct);
+	lio_cn68xx_set_dpi_regs(oct);
+
+	return 0;
+}
+
+static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+	struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+	u64 pktctl, tx_pipe, max_oqs;
+
+	pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+	/* 68XX specific */
+	max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf));
+	tx_pipe  = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
+	tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
+	tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
+	octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe);
+
+	if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf))
+		pktctl |= 0xF;
+	else
+		/* Disable per-port backpressure. */
+		pktctl &= ~0xF;
+	octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+static int lio_cn68xx_setup_device_regs(struct octeon_device *oct)
+{
+	lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+	lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B);
+	lio_cn6xxx_enable_error_reporting(oct);
+
+	lio_cn6xxx_setup_global_input_regs(oct);
+	lio_cn68xx_setup_pkt_ctl_regs(oct);
+	lio_cn6xxx_setup_global_output_regs(oct);
+
+	/* Default error timeout value should be 0x200000 to avoid host hang
+	 * when reads invalid register
+	 */
+	octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+
+	return 0;
+}
+
+static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
+{
+	u32 val = 0;
+
+	/* Set M_VEND1_DRP and M_VEND0_DRP bits */
+	pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val);
+	val |= 0x3;
+	pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
+}
+
+int lio_is_210nv(struct octeon_device *oct)
+{
+	u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
+
+	return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0);
+}
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
+{
+	struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+	u16 card_type = LIO_410NV;
+
+	if (octeon_map_pci_barx(oct, 0, 0))
+		return 1;
+
+	if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+		dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n",
+			__func__);
+		octeon_unmap_pci_barx(oct, 0);
+		return 1;
+	}
+
+	spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg);
+
+	oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs;
+	oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+	oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+	oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
+	oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
+	oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+	oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+	oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+	oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+	oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+	oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+	oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+	lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+	/* Determine variant of card */
+	if (lio_is_210nv(oct))
+		card_type = LIO_210NV;
+
+	cn68xx->conf = (struct octeon_config *)
+		       oct_get_config_info(oct, card_type);
+	if (!cn68xx->conf) {
+		dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n",
+			__func__,
+			(card_type == LIO_410NV) ? LIO_410NV_NAME :
+			LIO_210NV_NAME);
+		octeon_unmap_pci_barx(oct, 0);
+		octeon_unmap_pci_barx(oct, 1);
+		return 1;
+	}
+
+	oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+	lio_cn68xx_vendor_message_fix(oct);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
new file mode 100644
index 0000000..d4e1c9f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -0,0 +1,33 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file  cn68xx_device.h
+ *  \brief Host Driver: Routines that perform CN68XX specific operations.
+ */
+
+#ifndef __CN68XX_DEVICE_H__
+#define  __CN68XX_DEVICE_H__
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
+int lio_is_210nv(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
new file mode 100644
index 0000000..38cddbd
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -0,0 +1,51 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_regs.h
+ *  \brief Host Driver: Register Address and Register Mask values for
+ *  Octeon CN68XX devices. The register map for CN66XX is the same
+ *  for most registers. This file has the other registers that are
+ *  68XX-specific.
+ */
+
+#ifndef __CN68XX_REGS_H__
+#define __CN68XX_REGS_H__
+#include "cn66xx_regs.h"
+
+/*###################### REQUEST QUEUE #########################*/
+
+#define    CN68XX_SLI_IQ_PORT0_PKIND             0x0800
+
+#define    CN68XX_SLI_IQ_PORT_PKIND(iq)           \
+	(CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* Starting pipe number and number of pipes used by the SLI packet output. */
+#define    CN68XX_SLI_TX_PIPE                    0x1230
+
+/*######################## INTERRUPTS #########################*/
+
+/*------------------ Interrupt Masks ----------------*/
+#define    CN68XX_INTR_PIPE_ERR                  BIT_ULL(61)
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
new file mode 100644
index 0000000..160f807
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -0,0 +1,1216 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/ethtool.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+struct oct_mdio_cmd_context {
+	int octeon_id;
+	wait_queue_head_t wc;
+	int cond;
+};
+
+struct oct_mdio_cmd_resp {
+	u64 rh;
+	struct oct_mdio_cmd resp;
+	u64 status;
+};
+
+#define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
+
+/* Octeon's interface mode of operation */
+enum {
+	INTERFACE_MODE_DISABLED,
+	INTERFACE_MODE_RGMII,
+	INTERFACE_MODE_GMII,
+	INTERFACE_MODE_SPI,
+	INTERFACE_MODE_PCIE,
+	INTERFACE_MODE_XAUI,
+	INTERFACE_MODE_SGMII,
+	INTERFACE_MODE_PICMG,
+	INTERFACE_MODE_NPI,
+	INTERFACE_MODE_LOOP,
+	INTERFACE_MODE_SRIO,
+	INTERFACE_MODE_ILK,
+	INTERFACE_MODE_RXAUI,
+	INTERFACE_MODE_QSGMII,
+	INTERFACE_MODE_AGL,
+};
+
+#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
+#define OCT_ETHTOOL_REGDUMP_LEN  4096
+#define OCT_ETHTOOL_REGSVER  1
+
+static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
+	"Instr posted",
+	"Instr processed",
+	"Instr dropped",
+	"Bytes Sent",
+	"Sgentry_sent",
+	"Inst cntreg",
+	"Tx done",
+	"Tx Iq busy",
+	"Tx dropped",
+	"Tx bytes",
+};
+
+static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
+	"OQ Pkts Received",
+	"OQ Bytes Received",
+	"Dropped no dispatch",
+	"Dropped nomem",
+	"Dropped toomany",
+	"Stack RX cnt",
+	"Stack RX Bytes",
+	"RX dropped",
+};
+
+#define OCTNIC_NCMD_AUTONEG_ON  0x1
+#define OCTNIC_NCMD_PHY_ON      0x2
+
+static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct oct_link_info *linfo;
+
+	linfo = &lio->linfo;
+
+	if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+	    linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+		ecmd->port = PORT_FIBRE;
+		ecmd->supported =
+			(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
+			 SUPPORTED_Pause);
+		ecmd->advertising =
+			(ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+		ecmd->transceiver = XCVR_EXTERNAL;
+		ecmd->autoneg = AUTONEG_DISABLE;
+
+	} else {
+		dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+	}
+
+	if (linfo->link.s.status) {
+		ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
+		ecmd->duplex = linfo->link.s.duplex;
+	} else {
+		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+		ecmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static void
+lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+	struct lio *lio;
+	struct octeon_device *oct;
+
+	lio = GET_LIO(netdev);
+	oct = lio->oct_dev;
+
+	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+	strcpy(drvinfo->driver, "liquidio");
+	strcpy(drvinfo->version, LIQUIDIO_VERSION);
+	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+		ETHTOOL_FWVERS_LEN);
+	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+	drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static void
+lio_ethtool_get_channels(struct net_device *dev,
+			 struct ethtool_channels *channel)
+{
+	struct lio *lio = GET_LIO(dev);
+	struct octeon_device *oct = lio->oct_dev;
+	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
+
+	if (OCTEON_CN6XXX(oct)) {
+		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
+		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
+		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
+		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+	}
+
+	channel->max_rx = max_rx;
+	channel->max_tx = max_tx;
+	channel->rx_count = rx_count;
+	channel->tx_count = tx_count;
+}
+
+static int lio_get_eeprom_len(struct net_device *netdev)
+{
+	u8 buf[128];
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct_dev = lio->oct_dev;
+	struct octeon_board_info *board_info;
+	int len;
+
+	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+	len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+		      board_info->name, board_info->serial_number,
+		      board_info->major, board_info->minor);
+
+	return len;
+}
+
+static int
+lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+	       u8 *bytes)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct_dev = lio->oct_dev;
+	struct octeon_board_info *board_info;
+	int len;
+
+	if (eeprom->offset != 0)
+		return -EINVAL;
+
+	eeprom->magic = oct_dev->pci_dev->vendor;
+	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+	len =
+		sprintf((char *)bytes,
+			"boardname:%s serialnum:%s maj:%lld min:%lld\n",
+			board_info->name, board_info->serial_number,
+			board_info->major, board_info->minor);
+
+	return 0;
+}
+
+static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+	int ret = 0;
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.ncmd.s.param2 = addr;
+	nctrl.ncmd.s.param3 = val;
+	nctrl.wait_time = 100;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+	nparams.resp_order = OCTEON_RESP_ORDERED;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Callback for when mdio command response arrives
+ */
+static void octnet_mdio_resp_callback(struct octeon_device *oct,
+				      u32 status,
+				      void *buf)
+{
+	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+	struct oct_mdio_cmd_context *mdio_cmd_ctx;
+	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+
+	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+
+	oct = lio_get_device(mdio_cmd_ctx->octeon_id);
+	if (status) {
+		dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
+			CVM_CAST64(status));
+		ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+	} else {
+		ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+	}
+	wake_up_interruptible(&mdio_cmd_ctx->wc);
+}
+
+/* This routine provides PHY access routines for
+ * mdio  clause45 .
+ */
+static int
+octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
+{
+	struct octeon_device *oct_dev = lio->oct_dev;
+	struct octeon_soft_command *sc;
+	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+	struct oct_mdio_cmd_context *mdio_cmd_ctx;
+	struct oct_mdio_cmd *mdio_cmd;
+	int retval = 0;
+
+	sc = (struct octeon_soft_command *)
+		octeon_alloc_soft_command(oct_dev,
+					  sizeof(struct oct_mdio_cmd),
+					  sizeof(struct oct_mdio_cmd_resp),
+					  sizeof(struct oct_mdio_cmd_context));
+
+	if (!sc)
+		return -ENOMEM;
+
+	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+	mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
+
+	ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+	mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
+	mdio_cmd->op = op;
+	mdio_cmd->mdio_addr = loc;
+	if (op)
+		mdio_cmd->value1 = *value;
+	mdio_cmd->value2 = lio->linfo.ifidx;
+	octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+
+	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
+				    0, 0, 0);
+
+	sc->wait_time = 1000;
+	sc->callback = octnet_mdio_resp_callback;
+	sc->callback_arg = sc;
+
+	init_waitqueue_head(&mdio_cmd_ctx->wc);
+
+	retval = octeon_send_soft_command(oct_dev, sc);
+
+	if (retval) {
+		dev_err(&oct_dev->pci_dev->dev,
+			"octnet_mdio45_access instruction failed status: %x\n",
+			retval);
+		retval =  -EBUSY;
+	} else {
+		/* Sleep on a wait queue till the cond flag indicates that the
+		 * response arrived
+		 */
+		sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
+		retval = mdio_cmd_rsp->status;
+		if (retval) {
+			dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
+			retval = -EBUSY;
+		} else {
+			octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
+					    sizeof(struct oct_mdio_cmd) / 8);
+
+			if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+				if (!op)
+					*value = mdio_cmd_rsp->resp.value1;
+			} else {
+				retval = -EINVAL;
+			}
+		}
+	}
+
+	octeon_free_soft_command(oct_dev, sc);
+
+	return retval;
+}
+
+static int lio_set_phys_id(struct net_device *netdev,
+			   enum ethtool_phys_id_state state)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	int value, ret;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		if (oct->chip_id == OCTEON_CN66XX) {
+			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+					   VITESSE_PHY_GPIO_DRIVEON);
+			return 2;
+
+		} else if (oct->chip_id == OCTEON_CN68XX) {
+			/* Save the current LED settings */
+			ret = octnet_mdio45_access(lio, 0,
+						   LIO68XX_LED_BEACON_ADDR,
+						   &lio->phy_beacon_val);
+			if (ret)
+				return ret;
+
+			ret = octnet_mdio45_access(lio, 0,
+						   LIO68XX_LED_CTRL_ADDR,
+						   &lio->led_ctrl_val);
+			if (ret)
+				return ret;
+
+			/* Configure Beacon values */
+			value = LIO68XX_LED_BEACON_CFGON;
+			ret =
+				octnet_mdio45_access(lio, 1,
+						     LIO68XX_LED_BEACON_ADDR,
+						     &value);
+			if (ret)
+				return ret;
+
+			value = LIO68XX_LED_CTRL_CFGON;
+			ret =
+				octnet_mdio45_access(lio, 1,
+						     LIO68XX_LED_CTRL_ADDR,
+						     &value);
+			if (ret)
+				return ret;
+		} else {
+			return -EINVAL;
+		}
+		break;
+
+	case ETHTOOL_ID_ON:
+		if (oct->chip_id == OCTEON_CN66XX) {
+			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+					   VITESSE_PHY_GPIO_HIGH);
+
+		} else if (oct->chip_id == OCTEON_CN68XX) {
+			return -EINVAL;
+		} else {
+			return -EINVAL;
+		}
+		break;
+
+	case ETHTOOL_ID_OFF:
+		if (oct->chip_id == OCTEON_CN66XX)
+			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+					   VITESSE_PHY_GPIO_LOW);
+		else if (oct->chip_id == OCTEON_CN68XX)
+			return -EINVAL;
+		else
+			return -EINVAL;
+
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		if (oct->chip_id == OCTEON_CN66XX) {
+			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+					   VITESSE_PHY_GPIO_DRIVEOFF);
+		} else if (oct->chip_id == OCTEON_CN68XX) {
+			/* Restore LED settings */
+			ret = octnet_mdio45_access(lio, 1,
+						   LIO68XX_LED_CTRL_ADDR,
+						   &lio->led_ctrl_val);
+			if (ret)
+				return ret;
+
+			octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR,
+					     &lio->phy_beacon_val);
+			if (ret)
+				return ret;
+
+		} else {
+			return -EINVAL;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+lio_ethtool_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ering)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
+	    rx_pending = 0;
+
+	if (OCTEON_CN6XXX(oct)) {
+		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
+		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
+		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
+		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+	}
+
+	if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+		ering->rx_pending = 0;
+		ering->rx_max_pending = 0;
+		ering->rx_mini_pending = 0;
+		ering->rx_jumbo_pending = rx_pending;
+		ering->rx_mini_max_pending = 0;
+		ering->rx_jumbo_max_pending = rx_max_pending;
+	} else {
+		ering->rx_pending = rx_pending;
+		ering->rx_max_pending = rx_max_pending;
+		ering->rx_mini_pending = 0;
+		ering->rx_jumbo_pending = 0;
+		ering->rx_mini_max_pending = 0;
+		ering->rx_jumbo_max_pending = 0;
+	}
+
+	ering->tx_pending = tx_pending;
+	ering->tx_max_pending = tx_max_pending;
+}
+
+static u32 lio_get_msglevel(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+
+	return lio->msg_enable;
+}
+
+static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+	struct lio *lio = GET_LIO(netdev);
+
+	if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
+		if (msglvl & NETIF_MSG_HW)
+			liquidio_set_feature(netdev,
+					     OCTNET_CMD_VERBOSE_ENABLE);
+		else
+			liquidio_set_feature(netdev,
+					     OCTNET_CMD_VERBOSE_DISABLE);
+	}
+
+	lio->msg_enable = msglvl;
+}
+
+static void
+lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+	/* Notes: Not supporting any auto negotiation in these
+	 * drivers. Just report pause frame support.
+	 */
+	pause->tx_pause = 1;
+	pause->rx_pause = 1;    /* TODO: Need to support RX pause frame!!. */
+}
+
+static void
+lio_get_ethtool_stats(struct net_device *netdev,
+		      struct ethtool_stats *stats, u64 *data)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct_dev = lio->oct_dev;
+	int i = 0, j;
+
+	for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
+		if (!(oct_dev->io_qmask.iq & (1UL << j)))
+			continue;
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+		data[i++] =
+			CVM_CAST64(
+				oct_dev->instr_queue[j]->stats.instr_processed);
+		data[i++] =
+			CVM_CAST64(
+				oct_dev->instr_queue[j]->stats.instr_dropped);
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+		data[i++] =
+			readl(oct_dev->instr_queue[j]->inst_cnt_reg);
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+		data[i++] =
+			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+	}
+
+	/* for (j = 0; j < oct_dev->num_oqs; j++){ */
+	for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
+		if (!(oct_dev->io_qmask.oq & (1UL << j)))
+			continue;
+		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+		data[i++] =
+			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+		data[i++] =
+			CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+		data[i++] =
+			CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+		data[i++] =
+			CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+	}
+}
+
+static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct_dev = lio->oct_dev;
+	int num_iq_stats, num_oq_stats, i, j;
+
+	num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+		if (!(oct_dev->io_qmask.iq & (1UL << i)))
+			continue;
+		for (j = 0; j < num_iq_stats; j++) {
+			sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+			data += ETH_GSTRING_LEN;
+		}
+	}
+
+	num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+	/* for (i = 0; i < oct_dev->num_oqs; i++) { */
+	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+		if (!(oct_dev->io_qmask.oq & (1UL << i)))
+			continue;
+		for (j = 0; j < num_oq_stats; j++) {
+			sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
+			data += ETH_GSTRING_LEN;
+		}
+	}
+}
+
+static int lio_get_sset_count(struct net_device *netdev, int sset)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct_dev = lio->oct_dev;
+
+	return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
+	       (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+}
+
+static int lio_get_intr_coalesce(struct net_device *netdev,
+				 struct ethtool_coalesce *intr_coal)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+	struct octeon_instr_queue *iq;
+	struct oct_intrmod_cfg *intrmod_cfg;
+
+	intrmod_cfg = &oct->intrmod;
+
+	switch (oct->chip_id) {
+	/* case OCTEON_CN73XX: Todo */
+	/*      break; */
+	case OCTEON_CN68XX:
+	case OCTEON_CN66XX:
+		if (!intrmod_cfg->intrmod_enable) {
+			intr_coal->rx_coalesce_usecs =
+				CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+			intr_coal->rx_max_coalesced_frames =
+				CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+		} else {
+			intr_coal->use_adaptive_rx_coalesce =
+				intrmod_cfg->intrmod_enable;
+			intr_coal->rate_sample_interval =
+				intrmod_cfg->intrmod_check_intrvl;
+			intr_coal->pkt_rate_high =
+				intrmod_cfg->intrmod_maxpkt_ratethr;
+			intr_coal->pkt_rate_low =
+				intrmod_cfg->intrmod_minpkt_ratethr;
+			intr_coal->rx_max_coalesced_frames_high =
+				intrmod_cfg->intrmod_maxcnt_trigger;
+			intr_coal->rx_coalesce_usecs_high =
+				intrmod_cfg->intrmod_maxtmr_trigger;
+			intr_coal->rx_coalesce_usecs_low =
+				intrmod_cfg->intrmod_mintmr_trigger;
+			intr_coal->rx_max_coalesced_frames_low =
+				intrmod_cfg->intrmod_mincnt_trigger;
+		}
+
+		iq = oct->instr_queue[lio->linfo.txpciq[0]];
+		intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+		break;
+
+	default:
+		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+				    u32 status,
+				    void *ptr)
+{
+	struct oct_intrmod_cmd *cmd = ptr;
+	struct octeon_soft_command *sc = cmd->sc;
+
+	oct_dev = cmd->oct_dev;
+
+	if (status)
+		dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
+			CVM_CAST64(status));
+	else
+		dev_info(&oct_dev->pci_dev->dev,
+			 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
+			 oct_dev->intrmod.intrmod_enable);
+
+	octeon_free_soft_command(oct_dev, sc);
+}
+
+/*  Configure interrupt moderation parameters */
+static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+{
+	struct octeon_soft_command *sc;
+	struct oct_intrmod_cmd *cmd;
+	struct oct_intrmod_cfg *cfg;
+	int retval;
+	struct octeon_device *oct_dev = (struct octeon_device *)oct;
+
+	/* Alloc soft command */
+	sc = (struct octeon_soft_command *)
+		octeon_alloc_soft_command(oct_dev,
+					  sizeof(struct oct_intrmod_cfg),
+					  0,
+					  sizeof(struct oct_intrmod_cmd));
+
+	if (!sc)
+		return -ENOMEM;
+
+	cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+	cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
+
+	memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
+	octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
+	cmd->sc = sc;
+	cmd->cfg = cfg;
+	cmd->oct_dev = oct_dev;
+
+	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+				    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
+
+	sc->callback = octnet_intrmod_callback;
+	sc->callback_arg = cmd;
+	sc->wait_time = 1000;
+
+	retval = octeon_send_soft_command(oct_dev, sc);
+	if (retval) {
+		octeon_free_soft_command(oct_dev, sc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
+				 *intr_coal, int adaptive)
+{
+	int ret = 0;
+	struct octeon_device *oct = lio->oct_dev;
+	struct oct_intrmod_cfg *intrmod_cfg;
+
+	intrmod_cfg = &oct->intrmod;
+
+	if (adaptive) {
+		if (intr_coal->rate_sample_interval)
+			intrmod_cfg->intrmod_check_intrvl =
+				intr_coal->rate_sample_interval;
+		else
+			intrmod_cfg->intrmod_check_intrvl =
+				LIO_INTRMOD_CHECK_INTERVAL;
+
+		if (intr_coal->pkt_rate_high)
+			intrmod_cfg->intrmod_maxpkt_ratethr =
+				intr_coal->pkt_rate_high;
+		else
+			intrmod_cfg->intrmod_maxpkt_ratethr =
+				LIO_INTRMOD_MAXPKT_RATETHR;
+
+		if (intr_coal->pkt_rate_low)
+			intrmod_cfg->intrmod_minpkt_ratethr =
+				intr_coal->pkt_rate_low;
+		else
+			intrmod_cfg->intrmod_minpkt_ratethr =
+				LIO_INTRMOD_MINPKT_RATETHR;
+
+		if (intr_coal->rx_max_coalesced_frames_high)
+			intrmod_cfg->intrmod_maxcnt_trigger =
+				intr_coal->rx_max_coalesced_frames_high;
+		else
+			intrmod_cfg->intrmod_maxcnt_trigger =
+				LIO_INTRMOD_MAXCNT_TRIGGER;
+
+		if (intr_coal->rx_coalesce_usecs_high)
+			intrmod_cfg->intrmod_maxtmr_trigger =
+				intr_coal->rx_coalesce_usecs_high;
+		else
+			intrmod_cfg->intrmod_maxtmr_trigger =
+				LIO_INTRMOD_MAXTMR_TRIGGER;
+
+		if (intr_coal->rx_coalesce_usecs_low)
+			intrmod_cfg->intrmod_mintmr_trigger =
+				intr_coal->rx_coalesce_usecs_low;
+		else
+			intrmod_cfg->intrmod_mintmr_trigger =
+				LIO_INTRMOD_MINTMR_TRIGGER;
+
+		if (intr_coal->rx_max_coalesced_frames_low)
+			intrmod_cfg->intrmod_mincnt_trigger =
+				intr_coal->rx_max_coalesced_frames_low;
+		else
+			intrmod_cfg->intrmod_mincnt_trigger =
+				LIO_INTRMOD_MINCNT_TRIGGER;
+	}
+
+	intrmod_cfg->intrmod_enable = adaptive;
+	ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+
+	return ret;
+}
+
+static int
+oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+{
+	int ret;
+	struct octeon_device *oct = lio->oct_dev;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+	u32 rx_max_coalesced_frames;
+
+	if (!intr_coal->rx_max_coalesced_frames)
+		rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+	else
+		rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
+
+	/* Disable adaptive interrupt modulation */
+	ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+	if (ret)
+		return ret;
+
+	/* Config Cnt based interrupt values */
+	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+			 rx_max_coalesced_frames);
+	CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+	return 0;
+}
+
+static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
+			       *intr_coal)
+{
+	int ret;
+	struct octeon_device *oct = lio->oct_dev;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+	u32 time_threshold, rx_coalesce_usecs;
+
+	if (!intr_coal->rx_coalesce_usecs)
+		rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+	else
+		rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+	/* Disable adaptive interrupt modulation */
+	ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+	if (ret)
+		return ret;
+
+	/* Config Time based interrupt values */
+	time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
+	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+	CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+
+	return 0;
+}
+
+static int lio_set_intr_coalesce(struct net_device *netdev,
+				 struct ethtool_coalesce *intr_coal)
+{
+	struct lio *lio = GET_LIO(netdev);
+	int ret;
+	struct octeon_device *oct = lio->oct_dev;
+	u32 j, q_no;
+
+	if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
+	    (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
+		for (j = 0; j < lio->linfo.num_txpciq; j++) {
+			q_no = lio->linfo.txpciq[j];
+			oct->instr_queue[q_no]->fill_threshold =
+				intr_coal->tx_max_coalesced_frames;
+		}
+	} else {
+		dev_err(&oct->pci_dev->dev,
+			"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+			intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
+			CN6XXX_DB_MAX);
+		return -EINVAL;
+	}
+
+	/* User requested adaptive-rx on */
+	if (intr_coal->use_adaptive_rx_coalesce) {
+		ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
+		if (ret)
+			goto ret_intrmod;
+	}
+
+	/* User requested adaptive-rx off and rx coalesce */
+	if ((intr_coal->rx_coalesce_usecs) &&
+	    (!intr_coal->use_adaptive_rx_coalesce)) {
+		ret = oct_cfg_rx_intrtime(lio, intr_coal);
+		if (ret)
+			goto ret_intrmod;
+	}
+
+	/* User requested adaptive-rx off and rx coalesce */
+	if ((intr_coal->rx_max_coalesced_frames) &&
+	    (!intr_coal->use_adaptive_rx_coalesce)) {
+		ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+		if (ret)
+			goto ret_intrmod;
+	}
+
+	/* User requested adaptive-rx off, so use default coalesce params */
+	if ((!intr_coal->rx_max_coalesced_frames) &&
+	    (!intr_coal->use_adaptive_rx_coalesce) &&
+	    (!intr_coal->rx_coalesce_usecs)) {
+		dev_info(&oct->pci_dev->dev,
+			 "Turning off adaptive-rx interrupt moderation\n");
+		dev_info(&oct->pci_dev->dev,
+			 "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
+			 CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
+		ret = oct_cfg_rx_intrtime(lio, intr_coal);
+		if (ret)
+			goto ret_intrmod;
+
+		ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+		if (ret)
+			goto ret_intrmod;
+	}
+
+	return 0;
+ret_intrmod:
+	return ret;
+}
+
+static int lio_get_ts_info(struct net_device *netdev,
+			   struct ethtool_ts_info *info)
+{
+	struct lio *lio = GET_LIO(netdev);
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	if (lio->ptp_clock)
+		info->phc_index = ptp_clock_index(lio->ptp_clock);
+	else
+		info->phc_index = -1;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+	return 0;
+}
+
+static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct oct_link_info *linfo;
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+	int ret = 0;
+
+	/* get the link info */
+	linfo = &lio->linfo;
+
+	if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
+		return -EINVAL;
+
+	if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
+						  ecmd->speed != SPEED_10) ||
+						 (ecmd->duplex != DUPLEX_HALF &&
+						  ecmd->duplex != DUPLEX_FULL)))
+		return -EINVAL;
+
+	/* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+	 * as they operate at fixed Speed and Duplex settings
+	 */
+	if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+	    linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+		dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+		return -EINVAL;
+	}
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+	nctrl.wait_time = 1000;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+	/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
+	 * to SE core application using ncmd.s.more & ncmd.s.param
+	 */
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		/* Autoneg ON */
+		nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
+				     OCTNIC_NCMD_AUTONEG_ON;
+		nctrl.ncmd.s.param2 = ecmd->advertising;
+	} else {
+		/* Autoneg OFF */
+		nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
+
+		nctrl.ncmd.s.param3 = ecmd->duplex;
+
+		nctrl.ncmd.s.param2 = ecmd->speed;
+	}
+
+	nparams.resp_order = OCTEON_RESP_ORDERED;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int lio_nway_reset(struct net_device *netdev)
+{
+	if (netif_running(netdev)) {
+		struct ethtool_cmd ecmd;
+
+		memset(&ecmd, 0, sizeof(struct ethtool_cmd));
+		ecmd.autoneg = 0;
+		ecmd.speed = 0;
+		ecmd.duplex = 0;
+		lio_set_settings(netdev, &ecmd);
+	}
+	return 0;
+}
+
+/* Return register dump len. */
+static int lio_get_regs_len(struct net_device *dev)
+{
+	return OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+	u32 reg;
+	int i, len = 0;
+
+	/* PCI  Window Registers */
+
+	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+	reg = CN6XXX_WIN_WR_ADDR_LO;
+	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
+		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
+	reg = CN6XXX_WIN_WR_ADDR_HI;
+	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
+		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
+	reg = CN6XXX_WIN_RD_ADDR_LO;
+	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
+		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
+	reg = CN6XXX_WIN_RD_ADDR_HI;
+	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
+		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
+	reg = CN6XXX_WIN_WR_DATA_LO;
+	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
+		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
+	reg = CN6XXX_WIN_WR_DATA_HI;
+	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
+		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
+	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
+		       CN6XXX_WIN_WR_MASK_REG,
+		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
+
+	/* PCI  Interrupt Register */
+	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
+		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
+						CN6XXX_SLI_INT_ENB64_PORT0));
+	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
+		       CN6XXX_SLI_INT_ENB64_PORT1,
+		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
+	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
+		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
+
+	/* PCI  Output queue registers */
+	for (i = 0; i < oct->num_oqs; i++) {
+		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
+		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
+			       reg, i, octeon_read_csr(oct, reg));
+		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
+		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
+			       reg, i, octeon_read_csr(oct, reg));
+	}
+	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
+	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
+		       reg, octeon_read_csr(oct, reg));
+	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
+	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
+		       reg, octeon_read_csr(oct, reg));
+
+	/* PCI  Input queue registers */
+	for (i = 0; i <= 3; i++) {
+		u32 reg;
+
+		reg = CN6XXX_SLI_IQ_DOORBELL(i);
+		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
+			       reg, i, octeon_read_csr(oct, reg));
+		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
+		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
+			       reg, i, octeon_read_csr(oct, reg));
+	}
+
+	/* PCI  DMA registers */
+
+	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
+		       CN6XXX_DMA_CNT(0),
+		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
+	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
+	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
+		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
+	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
+	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
+		       CN6XXX_DMA_TIME_INT_LEVEL(0),
+		       octeon_read_csr(oct, reg));
+
+	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
+		       CN6XXX_DMA_CNT(1),
+		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
+	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
+		       CN6XXX_DMA_PKT_INT_LEVEL(1),
+		       octeon_read_csr(oct, reg));
+	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
+		       CN6XXX_DMA_TIME_INT_LEVEL(1),
+		       octeon_read_csr(oct, reg));
+
+	/* PCI  Index registers */
+
+	len += sprintf(s + len, "\n");
+
+	for (i = 0; i < 16; i++) {
+		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
+		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
+			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
+	}
+
+	return len;
+}
+
+static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
+{
+	u32 val;
+	int i, len = 0;
+
+	/* PCI CONFIG Registers */
+
+	len += sprintf(s + len,
+		       "\n\t Octeon Config space Registers\n\n");
+
+	for (i = 0; i <= 13; i++) {
+		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+			       (i * 4), i, val);
+	}
+
+	for (i = 30; i <= 34; i++) {
+		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+			       (i * 4), i, val);
+	}
+
+	return len;
+}
+
+/*  Return register dump user app.  */
+static void lio_get_regs(struct net_device *dev,
+			 struct ethtool_regs *regs, void *regbuf)
+{
+	struct lio *lio = GET_LIO(dev);
+	int len = 0;
+	struct octeon_device *oct = lio->oct_dev;
+
+	memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
+	regs->version = OCT_ETHTOOL_REGSVER;
+
+	switch (oct->chip_id) {
+	/* case OCTEON_CN73XX: Todo */
+	case OCTEON_CN68XX:
+	case OCTEON_CN66XX:
+		len += cn6xxx_read_csr_reg(regbuf + len, oct);
+		len += cn6xxx_read_config_reg(regbuf + len, oct);
+		break;
+	default:
+		dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
+			__func__, oct->chip_id);
+	}
+}
+
+static const struct ethtool_ops lio_ethtool_ops = {
+	.get_settings		= lio_get_settings,
+	.get_link		= ethtool_op_get_link,
+	.get_drvinfo		= lio_get_drvinfo,
+	.get_ringparam		= lio_ethtool_get_ringparam,
+	.get_channels		= lio_ethtool_get_channels,
+	.set_phys_id		= lio_set_phys_id,
+	.get_eeprom_len		= lio_get_eeprom_len,
+	.get_eeprom		= lio_get_eeprom,
+	.get_strings		= lio_get_strings,
+	.get_ethtool_stats	= lio_get_ethtool_stats,
+	.get_pauseparam		= lio_get_pauseparam,
+	.get_regs_len		= lio_get_regs_len,
+	.get_regs		= lio_get_regs,
+	.get_msglevel		= lio_get_msglevel,
+	.set_msglevel		= lio_set_msglevel,
+	.get_sset_count		= lio_get_sset_count,
+	.nway_reset		= lio_nway_reset,
+	.set_settings		= lio_set_settings,
+	.get_coalesce		= lio_get_intr_coalesce,
+	.set_coalesce		= lio_set_intr_coalesce,
+	.get_ts_info		= lio_get_ts_info,
+};
+
+void liquidio_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &lio_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
new file mode 100644
index 0000000..0660dee
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -0,0 +1,3668 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LIQUIDIO_VERSION);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
+
+static int ddr_timeout = 10000;
+module_param(ddr_timeout, int, 0644);
+MODULE_PARM_DESC(ddr_timeout,
+		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
+
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+		 "Bitmask indicating which consoles have debug output redirected to syslog.");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+static char fw_type[LIO_MAX_FW_TYPE_LEN];
+module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
+
+static int conf_type;
+module_param(conf_type, int, 0);
+MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+
+/* Bit mask values for lio->ifstate */
+#define   LIO_IFSTATE_DROQ_OPS             0x01
+#define   LIO_IFSTATE_REGISTERED           0x02
+#define   LIO_IFSTATE_RUNNING              0x04
+#define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
+/* Polling interval for determining when NIC application is alive */
+#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
+
+/* runtime link query interval */
+#define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
+
+struct liquidio_if_cfg_context {
+	int octeon_id;
+
+	wait_queue_head_t wc;
+
+	int cond;
+};
+
+struct liquidio_if_cfg_resp {
+	u64 rh;
+	struct liquidio_if_cfg_info cfg_info;
+	u64 status;
+};
+
+struct oct_link_status_resp {
+	u64 rh;
+	struct oct_link_info link_info;
+	u64 status;
+};
+
+struct oct_timestamp_resp {
+	u64 rh;
+	u64 timestamp;
+	u64 status;
+};
+
+#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
+
+union tx_info {
+	u64 u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		u16 gso_size;
+		u16 gso_segs;
+		u32 reserved;
+#else
+		u32 reserved;
+		u16 gso_segs;
+		u16 gso_size;
+#endif
+	} s;
+};
+
+/** Octeon device properties to be used by the NIC module.
+ * Each octeon device in the system will be represented
+ * by this structure in the NIC module.
+ */
+
+#define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+/** Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+	/** List manipulation. Next and prev pointers. */
+	struct list_head list;
+
+	/** Size of the gather component at sg in bytes. */
+	int sg_size;
+
+	/** Number of bytes that sg was adjusted to make it 8B-aligned. */
+	int adjust;
+
+	/** Gather component that can accommodate max sized fragment list
+	 *  received from the IP layer.
+	 */
+	struct octeon_sg_entry *sg;
+};
+
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct octnet_buf_free_info {
+	/** Bytes 1-8.  Pointer to network device private structure. */
+	struct lio *lio;
+
+	/** Bytes 9-16.  Pointer to sk_buff. */
+	struct sk_buff *skb;
+
+	/** Bytes 17-24.  Pointer to gather list. */
+	struct octnic_gather *g;
+
+	/** Bytes 25-32. Physical address of skb->data or gather list. */
+	u64 dptr;
+
+	/** Bytes 33-47. Piggybacked soft command, if any */
+	struct octeon_soft_command *sc;
+};
+
+struct handshake {
+	struct completion init;
+	struct completion started;
+	struct pci_dev *pci_dev;
+	int init_ok;
+	int started_ok;
+};
+
+struct octeon_device_priv {
+	/** Tasklet structures for this device. */
+	struct tasklet_struct droq_tasklet;
+	unsigned long napi_mask;
+};
+
+static int octeon_device_init(struct octeon_device *);
+static void liquidio_remove(struct pci_dev *pdev);
+static int liquidio_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *ent);
+
+static struct handshake handshake[MAX_OCTEON_DEVICES];
+static struct completion first_stage;
+
+static void octeon_droq_bh(unsigned long pdev)
+{
+	int q_no;
+	int reschedule = 0;
+	struct octeon_device *oct = (struct octeon_device *)pdev;
+	struct octeon_device_priv *oct_priv =
+		(struct octeon_device_priv *)oct->priv;
+
+	/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
+	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
+		if (!(oct->io_qmask.oq & (1UL << q_no)))
+			continue;
+		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
+							  MAX_PACKET_BUDGET);
+	}
+
+	if (reschedule)
+		tasklet_schedule(&oct_priv->droq_tasklet);
+}
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+	struct octeon_device_priv *oct_priv =
+		(struct octeon_device_priv *)oct->priv;
+	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
+	int i;
+
+	do {
+		pending_pkts = 0;
+
+		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+			if (!(oct->io_qmask.oq & (1UL << i)))
+				continue;
+			pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
+								 oct->droq[i]);
+		}
+		if (pkt_cnt > 0) {
+			pending_pkts += pkt_cnt;
+			tasklet_schedule(&oct_priv->droq_tasklet);
+		}
+		pkt_cnt = 0;
+		schedule_timeout_uninterruptible(1);
+
+	} while (retry-- && pending_pkts);
+
+	return pkt_cnt;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+					unsigned int bytes_compl)
+{
+	struct netdev_queue *netdev_queue = txq;
+
+	netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+					  unsigned int *pkts_compl,
+					  unsigned int *bytes_compl)
+{
+	struct octnet_buf_free_info *finfo;
+	struct sk_buff *skb = NULL;
+	struct octeon_soft_command *sc;
+
+	switch (reqtype) {
+	case REQTYPE_NORESP_NET:
+	case REQTYPE_NORESP_NET_SG:
+		finfo = buf;
+		skb = finfo->skb;
+		break;
+
+	case REQTYPE_RESP_NET_SG:
+	case REQTYPE_RESP_NET:
+		sc = buf;
+		skb = sc->callback_arg;
+		break;
+
+	default:
+		return;
+	}
+
+	(*pkts_compl)++;
+	*bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+	struct octnet_buf_free_info *finfo;
+	struct sk_buff *skb;
+	struct octeon_soft_command *sc;
+	struct netdev_queue *txq;
+
+	switch (reqtype) {
+	case REQTYPE_NORESP_NET:
+	case REQTYPE_NORESP_NET_SG:
+		finfo = buf;
+		skb = finfo->skb;
+		break;
+
+	case REQTYPE_RESP_NET_SG:
+	case REQTYPE_RESP_NET:
+		sc = buf;
+		skb = sc->callback_arg;
+		break;
+
+	default:
+		return;
+	}
+
+	txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+	netdev_tx_sent_queue(txq, skb->len);
+}
+
+int octeon_console_debug_enabled(u32 console)
+{
+	return (console_bitmask >> (console)) & 0x1;
+}
+
+/**
+ * \brief Forces all IO queues off on a given device
+ * @param oct Pointer to Octeon device
+ */
+static void force_io_queues_off(struct octeon_device *oct)
+{
+	if ((oct->chip_id == OCTEON_CN66XX) ||
+	    (oct->chip_id == OCTEON_CN68XX)) {
+		/* Reset the Enable bits for Input Queues. */
+		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+
+		/* Reset the Enable bits for Output Queues. */
+		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+	}
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static int wait_for_pending_requests(struct octeon_device *oct)
+{
+	int i, pcount = 0;
+
+	for (i = 0; i < 100; i++) {
+		pcount =
+			atomic_read(&oct->response_list
+				[OCTEON_ORDERED_SC_LIST].pending_req_count);
+		if (pcount)
+			schedule_timeout_uninterruptible(HZ / 10);
+		 else
+			break;
+	}
+
+	if (pcount)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * \brief Cause device to go quiet so it can be safely removed/reset/etc
+ * @param oct Pointer to Octeon device
+ */
+static inline void pcierror_quiesce_device(struct octeon_device *oct)
+{
+	int i;
+
+	/* Disable the input and output queues now. No more packets will
+	 * arrive from Octeon, but we should wait for all packet processing
+	 * to finish.
+	 */
+	force_io_queues_off(oct);
+
+	/* To allow for in-flight requests */
+	schedule_timeout_uninterruptible(100);
+
+	if (wait_for_pending_requests(oct))
+		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+	/* Force all requests waiting to be fetched by OCTEON to complete. */
+	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+		struct octeon_instr_queue *iq;
+
+		if (!(oct->io_qmask.iq & (1UL << i)))
+			continue;
+		iq = oct->instr_queue[i];
+
+		if (atomic_read(&iq->instr_pending)) {
+			spin_lock_bh(&iq->lock);
+			iq->fill_cnt = 0;
+			iq->octeon_read_index = iq->host_write_index;
+			iq->stats.instr_processed +=
+				atomic_read(&iq->instr_pending);
+			lio_process_iq_request_list(oct, iq);
+			spin_unlock_bh(&iq->lock);
+		}
+	}
+
+	/* Force all pending ordered list requests to time out. */
+	lio_process_ordered_list(oct, 1);
+
+	/* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * \brief Cleanup PCI AER uncorrectable error status
+ * @param dev Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+	int pos = 0x100;
+	u32 status, mask;
+
+	pr_info("%s :\n", __func__);
+
+	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+	if (dev->error_state == pci_channel_io_normal)
+		status &= ~mask;        /* Clear corresponding nonfatal bits */
+	else
+		status &= mask;         /* Clear corresponding fatal bits */
+	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * \brief Stop all PCI IO to a given device
+ * @param dev Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+	/* No more instructions will be forwarded. */
+	atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+	pci_disable_device(oct->pci_dev);
+
+	/* Disable interrupts  */
+	oct->fn_list.disable_interrupt(oct->chip);
+
+	pcierror_quiesce_device(oct);
+
+	/* Release the interrupt line */
+	free_irq(oct->pci_dev->irq, oct);
+
+	if (oct->flags & LIO_FLAG_MSI_ENABLED)
+		pci_disable_msi(oct->pci_dev);
+
+	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+		lio_get_state_string(&oct->status));
+
+	/* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
+	/* making it a common function for all OCTEON models */
+	cleanup_aer_uncorrect_error_status(oct->pci_dev);
+}
+
+/**
+ * \brief called when PCI error is detected
+ * @param pdev Pointer to PCI device
+ * @param state The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+						     pci_channel_state_t state)
+{
+	struct octeon_device *oct = pci_get_drvdata(pdev);
+
+	/* Non-correctable Non-fatal errors */
+	if (state == pci_channel_io_normal) {
+		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+		cleanup_aer_uncorrect_error_status(oct->pci_dev);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	}
+
+	/* Non-correctable Fatal errors */
+	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+	stop_pci_io(oct);
+
+	/* Always return a DISCONNECT. There is no support for recovery but only
+	 * for a clean shutdown.
+	 */
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * \brief mmio handler
+ * @param pdev Pointer to PCI device
+ */
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+{
+	/* We should never hit this since we never ask for a reset for a Fatal
+	 * Error. We always return DISCONNECT in io_error above.
+	 * But play safe and return RECOVERED for now.
+	 */
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called after the pci bus has been reset.
+ * @param pdev Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the octeon_resume routine.
+ */
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+{
+	/* We should never hit this since we never ask for a reset for a Fatal
+	 * Error. We always return DISCONNECT in io_error above.
+	 * But play safe and return RECOVERED for now.
+	 */
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called when traffic can start flowing again.
+ * @param pdev Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the octeon_resume routine.
+ */
+static void liquidio_pcie_resume(struct pci_dev *pdev)
+{
+	/* Nothing to be done here. */
+}
+
+#ifdef CONFIG_PM
+/**
+ * \brief called when suspending
+ * @param pdev Pointer to PCI device
+ * @param state state to suspend to
+ */
+static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	return 0;
+}
+
+/**
+ * \brief called when resuming
+ * @param pdev Pointer to PCI device
+ */
+static int liquidio_resume(struct pci_dev *pdev)
+{
+	return 0;
+}
+#endif
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static struct pci_error_handlers liquidio_err_handler = {
+	.error_detected = liquidio_pcie_error_detected,
+	.mmio_enabled	= liquidio_pcie_mmio_enabled,
+	.slot_reset	= liquidio_pcie_slot_reset,
+	.resume		= liquidio_pcie_resume,
+};
+
+static const struct pci_device_id liquidio_pci_tbl[] = {
+	{       /* 68xx */
+		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+	},
+	{       /* 66xx */
+		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+	},
+	{
+		0, 0, 0, 0, 0, 0, 0
+	}
+};
+MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
+
+static struct pci_driver liquidio_pci_driver = {
+	.name		= "LiquidIO",
+	.id_table	= liquidio_pci_tbl,
+	.probe		= liquidio_probe,
+	.remove		= liquidio_remove,
+	.err_handler	= &liquidio_err_handler,    /* For AER */
+
+#ifdef CONFIG_PM
+	.suspend	= liquidio_suspend,
+	.resume		= liquidio_resume,
+#endif
+
+};
+
+/**
+ * \brief register PCI driver
+ */
+static int liquidio_init_pci(void)
+{
+	return pci_register_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief unregister PCI driver
+ */
+static void liquidio_deinit_pci(void)
+{
+	pci_unregister_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+	return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+	atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+	atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
+/**
+ * \brief Stop Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_stop(struct net_device *netdev)
+{
+	if (netif_is_multiqueue(netdev)) {
+		int i;
+
+		for (i = 0; i < netdev->num_tx_queues; i++)
+			netif_stop_subqueue(netdev, i);
+	} else {
+		netif_stop_queue(netdev);
+	}
+}
+
+/**
+ * \brief Start Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_start(struct net_device *netdev)
+{
+	if (netif_is_multiqueue(netdev)) {
+		int i;
+
+		for (i = 0; i < netdev->num_tx_queues; i++)
+			netif_start_subqueue(netdev, i);
+	} else {
+		netif_start_queue(netdev);
+	}
+}
+
+/**
+ * \brief Wake Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_wake(struct net_device *netdev)
+{
+	if (netif_is_multiqueue(netdev)) {
+		int i;
+
+		for (i = 0; i < netdev->num_tx_queues; i++)
+			netif_wake_subqueue(netdev, i);
+	} else {
+		netif_wake_queue(netdev);
+	}
+}
+
+/**
+ * \brief Stop Tx queue
+ * @param netdev network device
+ */
+static void stop_txq(struct net_device *netdev)
+{
+	txqs_stop(netdev);
+}
+
+/**
+ * \brief Start Tx queue
+ * @param netdev network device
+ */
+static void start_txq(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+
+	if (lio->linfo.link.s.status) {
+		txqs_start(netdev);
+		return;
+	}
+}
+
+/**
+ * \brief Wake a queue
+ * @param netdev network device
+ * @param q which queue to wake
+ */
+static inline void wake_q(struct net_device *netdev, int q)
+{
+	if (netif_is_multiqueue(netdev))
+		netif_wake_subqueue(netdev, q);
+	else
+		netif_wake_queue(netdev);
+}
+
+/**
+ * \brief Stop a queue
+ * @param netdev network device
+ * @param q which queue to stop
+ */
+static inline void stop_q(struct net_device *netdev, int q)
+{
+	if (netif_is_multiqueue(netdev))
+		netif_stop_subqueue(netdev, q);
+	else
+		netif_stop_queue(netdev);
+}
+
+/**
+ * \brief Check Tx queue status, and take appropriate action
+ * @param lio per-network private data
+ * @returns 0 if full, number of queues woken up otherwise
+ */
+static inline int check_txq_status(struct lio *lio)
+{
+	int ret_val = 0;
+
+	if (netif_is_multiqueue(lio->netdev)) {
+		int numqs = lio->netdev->num_tx_queues;
+		int q, iq = 0;
+
+		/* check each sub-queue state */
+		for (q = 0; q < numqs; q++) {
+			iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+			if (octnet_iq_is_full(lio->oct_dev, iq))
+				continue;
+			wake_q(lio->netdev, q);
+			ret_val++;
+		}
+	} else {
+		if (octnet_iq_is_full(lio->oct_dev, lio->txq))
+			return 0;
+		wake_q(lio->netdev, lio->txq);
+		ret_val = 1;
+	}
+	return ret_val;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *list_delete_head(struct list_head *root)
+{
+	struct list_head *node;
+
+	if ((root->prev == root) && (root->next == root))
+		node = NULL;
+	else
+		node = root->next;
+
+	if (node)
+		list_del(node);
+
+	return node;
+}
+
+/**
+ * \brief Delete gather list
+ * @param lio per-network private data
+ */
+static void delete_glist(struct lio *lio)
+{
+	struct octnic_gather *g;
+
+	do {
+		g = (struct octnic_gather *)
+		    list_delete_head(&lio->glist);
+		if (g) {
+			if (g->sg)
+				kfree((void *)((unsigned long)g->sg -
+						g->adjust));
+			kfree(g);
+		}
+	} while (g);
+}
+
+/**
+ * \brief Setup gather list
+ * @param lio per-network private data
+ */
+static int setup_glist(struct lio *lio)
+{
+	int i;
+	struct octnic_gather *g;
+
+	INIT_LIST_HEAD(&lio->glist);
+
+	for (i = 0; i < lio->tx_qsize; i++) {
+		g = kmalloc(sizeof(*g), GFP_KERNEL);
+		if (!g)
+			break;
+		memset(g, 0, sizeof(struct octnic_gather));
+
+		g->sg_size =
+			((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+		g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+		if (!g->sg) {
+			kfree(g);
+			break;
+		}
+
+		/* The gather component should be aligned on 64-bit boundary */
+		if (((unsigned long)g->sg) & 7) {
+			g->adjust = 8 - (((unsigned long)g->sg) & 7);
+			g->sg = (struct octeon_sg_entry *)
+				((unsigned long)g->sg + g->adjust);
+		}
+		list_add_tail(&g->list, &lio->glist);
+	}
+
+	if (i == lio->tx_qsize)
+		return 0;
+
+	delete_glist(lio);
+	return 1;
+}
+
+/**
+ * \brief Print link information
+ * @param netdev network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+
+	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+		struct oct_link_info *linfo = &lio->linfo;
+
+		if (linfo->link.s.status) {
+			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+				   linfo->link.s.speed,
+				   (linfo->link.s.duplex) ? "Full" : "Half");
+		} else {
+			netif_info(lio, link, lio->netdev, "Link Down\n");
+		}
+	}
+}
+
+/**
+ * \brief Update link status
+ * @param netdev network device
+ * @param ls link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static inline void update_link_status(struct net_device *netdev,
+				      union oct_link_status *ls)
+{
+	struct lio *lio = GET_LIO(netdev);
+
+	if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
+		lio->linfo.link.u64 = ls->u64;
+
+		print_link_info(netdev);
+
+		if (lio->linfo.link.s.status) {
+			netif_carrier_on(netdev);
+			/* start_txq(netdev); */
+			txqs_wake(netdev);
+		} else {
+			netif_carrier_off(netdev);
+			stop_txq(netdev);
+		}
+	}
+}
+
+/**
+ * \brief Droq packet processor sceduler
+ * @param oct octeon device
+ */
+static
+void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+	struct octeon_device_priv *oct_priv =
+		(struct octeon_device_priv *)oct->priv;
+	u64 oq_no;
+	struct octeon_droq *droq;
+
+	if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+		for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+			if (!(oct->droq_intr & (1 << oq_no)))
+				continue;
+
+			droq = oct->droq[oq_no];
+
+			if (droq->ops.poll_mode) {
+				droq->ops.napi_fn(droq);
+				oct_priv->napi_mask |= (1 << oq_no);
+			} else {
+				tasklet_schedule(&oct_priv->droq_tasklet);
+			}
+		}
+	}
+}
+
+/**
+ * \brief Interrupt handler for octeon
+ * @param irq unused
+ * @param dev octeon device
+ */
+static
+irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+	struct octeon_device *oct = (struct octeon_device *)dev;
+	irqreturn_t ret;
+
+	/* Disable our interrupts for the duration of ISR */
+	oct->fn_list.disable_interrupt(oct->chip);
+
+	ret = oct->fn_list.process_interrupt_regs(oct);
+
+	if (ret == IRQ_HANDLED)
+		liquidio_schedule_droq_pkt_handlers(oct);
+
+	/* Re-enable our interrupts  */
+	if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+		oct->fn_list.enable_interrupt(oct->chip);
+
+	return ret;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+static int octeon_setup_interrupt(struct octeon_device *oct)
+{
+	int irqret, err;
+
+	err = pci_enable_msi(oct->pci_dev);
+	if (err)
+		dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+			 err);
+	else
+		oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+	irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
+			     IRQF_SHARED, "octeon", oct);
+	if (irqret) {
+		if (oct->flags & LIO_FLAG_MSI_ENABLED)
+			pci_disable_msi(oct->pci_dev);
+		dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+			irqret);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * \brief PCI probe handler
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct octeon_device *oct_dev = NULL;
+	struct handshake *hs;
+
+	oct_dev = octeon_allocate_device(pdev->device,
+					 sizeof(struct octeon_device_priv));
+	if (!oct_dev) {
+		dev_err(&pdev->dev, "Unable to allocate device\n");
+		return -ENOMEM;
+	}
+
+	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+		 (u32)pdev->vendor, (u32)pdev->device);
+
+	/* Assign octeon_device for this device to the private data area. */
+	pci_set_drvdata(pdev, oct_dev);
+
+	/* set linux specific device pointer */
+	oct_dev->pci_dev = (void *)pdev;
+
+	hs = &handshake[oct_dev->octeon_id];
+	init_completion(&hs->init);
+	init_completion(&hs->started);
+	hs->pci_dev = pdev;
+
+	if (oct_dev->octeon_id == 0)
+		/* first LiquidIO NIC is detected */
+		complete(&first_stage);
+
+	if (octeon_device_init(oct_dev)) {
+		liquidio_remove(pdev);
+		return -ENOMEM;
+	}
+
+	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+	return 0;
+}
+
+/**
+ *\brief Destroy resources associated with octeon device
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+	int i;
+	struct octeon_device_priv *oct_priv =
+		(struct octeon_device_priv *)oct->priv;
+
+	struct handshake *hs;
+
+	switch (atomic_read(&oct->status)) {
+	case OCT_DEV_RUNNING:
+	case OCT_DEV_CORE_OK:
+
+		/* No more instructions will be forwarded. */
+		atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+		oct->app_mode = CVM_DRV_INVALID_APP;
+		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+			lio_get_state_string(&oct->status));
+
+		schedule_timeout_uninterruptible(HZ / 10);
+
+		/* fallthrough */
+	case OCT_DEV_HOST_OK:
+
+		/* fallthrough */
+	case OCT_DEV_CONSOLE_INIT_DONE:
+		/* Remove any consoles */
+		octeon_remove_consoles(oct);
+
+		/* fallthrough */
+	case OCT_DEV_IO_QUEUES_DONE:
+		if (wait_for_pending_requests(oct))
+			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+		if (lio_wait_for_instr_fetch(oct))
+			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+		/* Disable the input and output queues now. No more packets will
+		 * arrive from Octeon, but we should wait for all packet
+		 * processing to finish.
+		 */
+		oct->fn_list.disable_io_queues(oct);
+
+		if (lio_wait_for_oq_pkts(oct))
+			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+		/* Disable interrupts  */
+		oct->fn_list.disable_interrupt(oct->chip);
+
+		/* Release the interrupt line */
+		free_irq(oct->pci_dev->irq, oct);
+
+		if (oct->flags & LIO_FLAG_MSI_ENABLED)
+			pci_disable_msi(oct->pci_dev);
+
+		/* Soft reset the octeon device before exiting */
+		oct->fn_list.soft_reset(oct);
+
+		/* Disable the device, releasing the PCI INT */
+		pci_disable_device(oct->pci_dev);
+
+		/* fallthrough */
+	case OCT_DEV_IN_RESET:
+	case OCT_DEV_DROQ_INIT_DONE:
+		/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
+		mdelay(100);
+		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+			if (!(oct->io_qmask.oq & (1UL << i)))
+				continue;
+			octeon_delete_droq(oct, i);
+		}
+
+		/* Force any pending handshakes to complete */
+		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+			hs = &handshake[i];
+
+			if (hs->pci_dev) {
+				handshake[oct->octeon_id].init_ok = 0;
+				complete(&handshake[oct->octeon_id].init);
+				handshake[oct->octeon_id].started_ok = 0;
+				complete(&handshake[oct->octeon_id].started);
+			}
+		}
+
+		/* fallthrough */
+	case OCT_DEV_RESP_LIST_INIT_DONE:
+		octeon_delete_response_list(oct);
+
+		/* fallthrough */
+	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+		octeon_free_sc_buffer_pool(oct);
+
+		/* fallthrough */
+	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+			if (!(oct->io_qmask.iq & (1UL << i)))
+				continue;
+			octeon_delete_instr_queue(oct, i);
+		}
+
+		/* fallthrough */
+	case OCT_DEV_DISPATCH_INIT_DONE:
+		octeon_delete_dispatch_list(oct);
+		cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+		/* fallthrough */
+	case OCT_DEV_PCI_MAP_DONE:
+		octeon_unmap_pci_barx(oct, 0);
+		octeon_unmap_pci_barx(oct, 1);
+
+		/* fallthrough */
+	case OCT_DEV_BEGIN_STATE:
+		/* Nothing to be done here either */
+		break;
+	}                       /* end switch(oct->status) */
+
+	tasklet_kill(&oct_priv->droq_tasklet);
+}
+
+/**
+ * \brief Send Rx control command
+ * @param lio per-network private data
+ * @param start_stop whether to start or stop
+ */
+static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.ncmd.s.param2 = start_stop;
+	nctrl.netpndev = (u64)lio->netdev;
+
+	nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+	if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+}
+
+/**
+ * \brief Destroy NIC device interface
+ * @param oct octeon device
+ * @param ifidx which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device  when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+	struct net_device *netdev = oct->props[ifidx].netdev;
+	struct lio *lio;
+
+	if (!netdev) {
+		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+			__func__, ifidx);
+		return;
+	}
+
+	lio = GET_LIO(netdev);
+
+	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+	send_rx_ctrl_cmd(lio, 0);
+
+	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+		txqs_stop(netdev);
+
+	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+		unregister_netdev(netdev);
+
+	delete_glist(lio);
+
+	free_netdev(netdev);
+
+	oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * \brief Stop complete NIC functionality
+ * @param oct octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+	int i, j;
+	struct lio *lio;
+
+	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+	if (!oct->ifcount) {
+		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+		return 1;
+	}
+
+	for (i = 0; i < oct->ifcount; i++) {
+		lio = GET_LIO(oct->props[i].netdev);
+		for (j = 0; j < lio->linfo.num_rxpciq; j++)
+			octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+	}
+
+	for (i = 0; i < oct->ifcount; i++)
+		liquidio_destroy_nic_device(oct, i);
+
+	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+	return 0;
+}
+
+/**
+ * \brief Cleans up resources at unload time
+ * @param pdev PCI device structure
+ */
+static void liquidio_remove(struct pci_dev *pdev)
+{
+	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
+		liquidio_stop_nic_module(oct_dev);
+
+	/* Reset the octeon device and cleanup all memory allocated for
+	 * the octeon device by driver.
+	 */
+	octeon_destroy_resources(oct_dev);
+
+	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+	/* This octeon device has been removed. Update the global
+	 * data structure to reflect this. Free the device structure.
+	 */
+	octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * \brief Identify the Octeon device and to map the BAR address space
+ * @param oct octeon device
+ */
+static int octeon_chip_specific_setup(struct octeon_device *oct)
+{
+	u32 dev_id, rev_id;
+	int ret = 1;
+
+	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
+	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+	oct->rev_id = rev_id & 0xff;
+
+	switch (dev_id) {
+	case OCTEON_CN68XX_PCIID:
+		oct->chip_id = OCTEON_CN68XX;
+		ret = lio_setup_cn68xx_octeon_device(oct);
+		break;
+
+	case OCTEON_CN66XX_PCIID:
+		oct->chip_id = OCTEON_CN66XX;
+		ret = lio_setup_cn66xx_octeon_device(oct);
+		break;
+	default:
+		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
+			dev_id);
+	}
+
+	if (!ret)
+		dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+			 OCTEON_MAJOR_REV(oct),
+			 OCTEON_MINOR_REV(oct),
+			 octeon_get_conf(oct)->card_name);
+
+	return ret;
+}
+
+/**
+ * \brief PCI initialization for each Octeon device.
+ * @param oct octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+	/* setup PCI stuff first */
+	if (pci_enable_device(oct->pci_dev)) {
+		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+		return 1;
+	}
+
+	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+		return 1;
+	}
+
+	/* Enable PCI DMA Master. */
+	pci_set_master(oct->pci_dev);
+
+	return 0;
+}
+
+/**
+ * \brief Check Tx queue state for a given network buffer
+ * @param lio per-network private data
+ * @param skb network buffer
+ */
+static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
+{
+	int q = 0, iq = 0;
+
+	if (netif_is_multiqueue(lio->netdev)) {
+		q = skb->queue_mapping;
+		iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+	} else {
+		iq = lio->txq;
+	}
+
+	if (octnet_iq_is_full(lio->oct_dev, iq))
+		return 0;
+	wake_q(lio->netdev, q);
+	return 1;
+}
+
+/**
+ * \brief Unmap and free network buffer
+ * @param buf buffer
+ */
+static void free_netbuf(void *buf)
+{
+	struct sk_buff *skb;
+	struct octnet_buf_free_info *finfo;
+	struct lio *lio;
+
+	finfo = (struct octnet_buf_free_info *)buf;
+	skb = finfo->skb;
+	lio = finfo->lio;
+
+	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+			 DMA_TO_DEVICE);
+
+	check_txq_state(lio, skb);
+
+	recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer
+ * @param buf buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+	struct octnet_buf_free_info *finfo;
+	struct sk_buff *skb;
+	struct lio *lio;
+	struct octnic_gather *g;
+	int i, frags;
+
+	finfo = (struct octnet_buf_free_info *)buf;
+	skb = finfo->skb;
+	lio = finfo->lio;
+	g = finfo->g;
+	frags = skb_shinfo(skb)->nr_frags;
+
+	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+			 g->sg[0].ptr[0], (skb->len - skb->data_len),
+			 DMA_TO_DEVICE);
+
+	i = 1;
+	while (frags--) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+		pci_unmap_page((lio->oct_dev)->pci_dev,
+			       g->sg[(i >> 2)].ptr[(i & 3)],
+			       frag->size, DMA_TO_DEVICE);
+		i++;
+	}
+
+	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+			 finfo->dptr, g->sg_size,
+			 DMA_TO_DEVICE);
+
+	spin_lock(&lio->lock);
+	list_add_tail(&g->list, &lio->glist);
+	spin_unlock(&lio->lock);
+
+	check_txq_state(lio, skb);     /* mq support: sub-queue state check */
+
+	recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer with response
+ * @param buf buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+	struct octeon_soft_command *sc;
+	struct octnet_buf_free_info *finfo;
+	struct sk_buff *skb;
+	struct lio *lio;
+	struct octnic_gather *g;
+	int i, frags;
+
+	sc = (struct octeon_soft_command *)buf;
+	skb = (struct sk_buff *)sc->callback_arg;
+	finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+	lio = finfo->lio;
+	g = finfo->g;
+	frags = skb_shinfo(skb)->nr_frags;
+
+	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+			 g->sg[0].ptr[0], (skb->len - skb->data_len),
+			 DMA_TO_DEVICE);
+
+	i = 1;
+	while (frags--) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+		pci_unmap_page((lio->oct_dev)->pci_dev,
+			       g->sg[(i >> 2)].ptr[(i & 3)],
+			       frag->size, DMA_TO_DEVICE);
+		i++;
+	}
+
+	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+			 finfo->dptr, g->sg_size,
+			 DMA_TO_DEVICE);
+
+	spin_lock(&lio->lock);
+	list_add_tail(&g->list, &lio->glist);
+	spin_unlock(&lio->lock);
+
+	/* Don't free the skb yet */
+
+	check_txq_state(lio, skb);
+}
+
+/**
+ * \brief Adjust ptp frequency
+ * @param ptp PTP clock info
+ * @param ppb how much to adjust by, in parts-per-billion
+ */
+static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct lio *lio = container_of(ptp, struct lio, ptp_info);
+	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+	u64 comp, delta;
+	unsigned long flags;
+	bool neg_adj = false;
+
+	if (ppb < 0) {
+		neg_adj = true;
+		ppb = -ppb;
+	}
+
+	/* The hardware adds the clock compensation value to the
+	 * PTP clock on every coprocessor clock cycle, so we
+	 * compute the delta in terms of coprocessor clocks.
+	 */
+	delta = (u64)ppb << 32;
+	do_div(delta, oct->coproc_clock_rate);
+
+	spin_lock_irqsave(&lio->ptp_lock, flags);
+	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
+	if (neg_adj)
+		comp -= delta;
+	else
+		comp += delta;
+	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+	spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+	return 0;
+}
+
+/**
+ * \brief Adjust ptp time
+ * @param ptp PTP clock info
+ * @param delta how much to adjust by, in nanosecs
+ */
+static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	unsigned long flags;
+	struct lio *lio = container_of(ptp, struct lio, ptp_info);
+
+	spin_lock_irqsave(&lio->ptp_lock, flags);
+	lio->ptp_adjust += delta;
+	spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+	return 0;
+}
+
+/**
+ * \brief Get hardware clock time, including any adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
+				struct timespec64 *ts)
+{
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+	struct lio *lio = container_of(ptp, struct lio, ptp_info);
+	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+	spin_lock_irqsave(&lio->ptp_lock, flags);
+	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
+	ns += lio->ptp_adjust;
+	spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+/**
+ * \brief Set hardware clock time. Reset adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
+				const struct timespec64 *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct lio *lio = container_of(ptp, struct lio, ptp_info);
+	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+	ns = timespec_to_ns(ts);
+
+	spin_lock_irqsave(&lio->ptp_lock, flags);
+	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
+	lio->ptp_adjust = 0;
+	spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+	return 0;
+}
+
+/**
+ * \brief Check if PTP is enabled
+ * @param ptp PTP clock info
+ * @param rq request
+ * @param on is it on
+ */
+static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
+			       struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * \brief Open PTP clock source
+ * @param netdev network device
+ */
+static void oct_ptp_open(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+	spin_lock_init(&lio->ptp_lock);
+
+	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
+	lio->ptp_info.owner = THIS_MODULE;
+	lio->ptp_info.max_adj = 250000000;
+	lio->ptp_info.n_alarm = 0;
+	lio->ptp_info.n_ext_ts = 0;
+	lio->ptp_info.n_per_out = 0;
+	lio->ptp_info.pps = 0;
+	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
+	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
+	lio->ptp_info.settime64 = liquidio_ptp_settime;
+	lio->ptp_info.enable = liquidio_ptp_enable;
+
+	lio->ptp_adjust = 0;
+
+	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
+					     &oct->pci_dev->dev);
+
+	if (IS_ERR(lio->ptp_clock))
+		lio->ptp_clock = NULL;
+}
+
+/**
+ * \brief Init PTP clock
+ * @param oct octeon device
+ */
+static void liquidio_ptp_init(struct octeon_device *oct)
+{
+	u64 clock_comp, cfg;
+
+	clock_comp = (u64)NSEC_PER_SEC << 32;
+	do_div(clock_comp, oct->coproc_clock_rate);
+	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+
+	/* Enable */
+	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
+	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
+}
+
+/**
+ * \brief Load firmware to device
+ * @param oct octeon device
+ *
+ * Maps device to firmware filename, requests firmware, and downloads it
+ */
+static int load_firmware(struct octeon_device *oct)
+{
+	int ret = 0;
+	const struct firmware *fw;
+	char fw_name[LIO_MAX_FW_FILENAME_LEN];
+	char *tmp_fw_type;
+
+	if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+		    sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+		dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
+		return ret;
+	}
+
+	if (fw_type[0] == '\0')
+		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
+	else
+		tmp_fw_type = fw_type;
+
+	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
+		octeon_get_conf(oct)->card_name, tmp_fw_type,
+		LIO_FW_NAME_SUFFIX);
+
+	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
+	if (ret) {
+		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
+			fw_name);
+		return ret;
+	}
+
+	ret = octeon_download_firmware(oct, fw->data, fw->size);
+
+	release_firmware(fw);
+
+	return ret;
+}
+
+/**
+ * \brief Setup output queue
+ * @param oct octeon device
+ * @param q_no which queue
+ * @param num_descs how many descriptors
+ * @param desc_size size of each descriptor
+ * @param app_ctx application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+			     int desc_size, void *app_ctx)
+{
+	int ret_val = 0;
+
+	dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+	/* droq creation and local register settings. */
+	ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+	if (ret_val == -1)
+		return ret_val;
+
+	if (ret_val == 1) {
+		dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+		return 0;
+	}
+	/* tasklet creation for the droq */
+
+	/* Enable the droq queues */
+	octeon_set_droq_pkt_op(oct, q_no, 1);
+
+	/* Send Credit for Octeon Output queues. Credits are always
+	 * sent after the output queue is enabled.
+	 */
+	writel(oct->droq[q_no]->max_count,
+	       oct->droq[q_no]->pkts_credit_reg);
+
+	return ret_val;
+}
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void if_cfg_callback(struct octeon_device *oct,
+			    u32 status,
+			    void *buf)
+{
+	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+	struct liquidio_if_cfg_resp *resp;
+	struct liquidio_if_cfg_context *ctx;
+
+	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+	ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+	oct = lio_get_device(ctx->octeon_id);
+	if (resp->status)
+		dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+			CVM_CAST64(resp->status));
+	ACCESS_ONCE(ctx->cond) = 1;
+
+	/* This barrier is required to be sure that the response has been
+	 * written fully before waking up the handler
+	 */
+	wmb();
+
+	wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Select queue based on hash
+ * @param dev Net device
+ * @param skb sk_buff structure
+ * @returns selected queue number
+ */
+static u16 select_q(struct net_device *dev, struct sk_buff *skb,
+		    void *accel_priv, select_queue_fallback_t fallback)
+{
+	int qindex;
+	struct lio *lio;
+
+	lio = GET_LIO(dev);
+	/* select queue on chosen queue_mapping or core */
+	qindex = skb_rx_queue_recorded(skb) ?
+		 skb_get_rx_queue(skb) : smp_processor_id();
+	return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+}
+
+/** Routine to push packets arriving on Octeon interface upto network layer.
+ * @param oct_id   - octeon device id.
+ * @param skbuff   - skbuff struct to be passed to network layer.
+ * @param len      - size of total data received.
+ * @param rh       - Control header associated with the packet
+ * @param param    - additional control data with the packet
+ */
+static void
+liquidio_push_packet(u32 octeon_id,
+		     void *skbuff,
+		     u32 len,
+		     union octeon_rh *rh,
+		     void *param)
+{
+	struct napi_struct *napi = param;
+	struct octeon_device *oct = lio_get_device(octeon_id);
+	struct sk_buff *skb = (struct sk_buff *)skbuff;
+	struct skb_shared_hwtstamps *shhwtstamps;
+	u64 ns;
+	struct net_device *netdev =
+		(struct net_device *)oct->props[rh->r_dh.link].netdev;
+	struct octeon_droq *droq = container_of(param, struct octeon_droq,
+						napi);
+	if (netdev) {
+		int packet_was_received;
+		struct lio *lio = GET_LIO(netdev);
+
+		/* Do not proceed if the interface is not in RUNNING state. */
+		if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+			recv_buffer_free(skb);
+			droq->stats.rx_dropped++;
+			return;
+		}
+
+		skb->dev = netdev;
+
+		if (rh->r_dh.has_hwtstamp) {
+			/* timestamp is included from the hardware at the
+			 * beginning of the packet.
+			 */
+			if (ifstate_check(lio,
+					  LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+				/* Nanoseconds are in the first 64-bits
+				 * of the packet.
+				 */
+				memcpy(&ns, (skb->data), sizeof(ns));
+				shhwtstamps = skb_hwtstamps(skb);
+				shhwtstamps->hwtstamp =
+					ns_to_ktime(ns + lio->ptp_adjust);
+			}
+			skb_pull(skb, sizeof(ns));
+		}
+
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if ((netdev->features & NETIF_F_RXCSUM) &&
+		    (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+			/* checksum has already been verified */
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
+
+		if (packet_was_received) {
+			droq->stats.rx_bytes_received += len;
+			droq->stats.rx_pkts_received++;
+			netdev->last_rx = jiffies;
+		} else {
+			droq->stats.rx_dropped++;
+			netif_info(lio, rx_err, lio->netdev,
+				   "droq:%d  error rx_dropped:%llu\n",
+				   droq->q_no, droq->stats.rx_dropped);
+		}
+
+	} else {
+		recv_buffer_free(skb);
+	}
+}
+
+/**
+ * \brief wrapper for calling napi_schedule
+ * @param param parameters to pass to napi_schedule
+ *
+ * Used when scheduling on different CPUs
+ */
+static void napi_schedule_wrapper(void *param)
+{
+	struct napi_struct *napi = param;
+
+	napi_schedule(napi);
+}
+
+/**
+ * \brief callback when receive interrupt occurs and we are in NAPI mode
+ * @param arg pointer to octeon output queue
+ */
+static void liquidio_napi_drv_callback(void *arg)
+{
+	struct octeon_droq *droq = arg;
+	int this_cpu = smp_processor_id();
+
+	if (droq->cpu_id == this_cpu) {
+		napi_schedule(&droq->napi);
+	} else {
+		struct call_single_data *csd = &droq->csd;
+
+		csd->func = napi_schedule_wrapper;
+		csd->info = &droq->napi;
+		csd->flags = 0;
+
+		smp_call_function_single_async(droq->cpu_id, csd);
+	}
+}
+
+/**
+ * \brief Main NAPI poll function
+ * @param droq octeon output queue
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
+{
+	int work_done;
+	struct lio *lio = GET_LIO(droq->napi.dev);
+	struct octeon_device *oct = lio->oct_dev;
+
+	work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+						 POLL_EVENT_PROCESS_PKTS,
+						 budget);
+	if (work_done < 0) {
+		netif_info(lio, rx_err, lio->netdev,
+			   "Receive work_done < 0, rxq:%d\n", droq->q_no);
+		goto octnet_napi_finish;
+	}
+
+	if (work_done > budget)
+		dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
+			__func__, work_done, budget);
+
+	return work_done;
+
+octnet_napi_finish:
+	napi_complete(&droq->napi);
+	octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
+				     0);
+	return 0;
+}
+
+/**
+ * \brief Entry point for NAPI polling
+ * @param napi NAPI structure
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct octeon_droq *droq;
+	int work_done;
+
+	droq = container_of(napi, struct octeon_droq, napi);
+
+	work_done = liquidio_napi_do_rx(droq, budget);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+					     POLL_EVENT_ENABLE_INTR, 0);
+		return 0;
+	}
+
+	return work_done;
+}
+
+/**
+ * \brief Setup input and output queues
+ * @param octeon_dev octeon device
+ * @param net_device Net device
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+static inline int setup_io_queues(struct octeon_device *octeon_dev,
+				  struct net_device *net_device)
+{
+	static int first_time = 1;
+	static struct octeon_droq_ops droq_ops;
+	static int cpu_id;
+	static int cpu_id_modulus;
+	struct octeon_droq *droq;
+	struct napi_struct *napi;
+	int q, q_no, retval = 0;
+	struct lio *lio;
+	int num_tx_descs;
+
+	lio = GET_LIO(net_device);
+	if (first_time) {
+		first_time = 0;
+		memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+		droq_ops.fptr = liquidio_push_packet;
+
+		droq_ops.poll_mode = 1;
+		droq_ops.napi_fn = liquidio_napi_drv_callback;
+		cpu_id = 0;
+		cpu_id_modulus = num_present_cpus();
+	}
+
+	/* set up DROQs. */
+	for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+		q_no = lio->linfo.rxpciq[q];
+
+		retval = octeon_setup_droq(octeon_dev, q_no,
+					   CFG_GET_NUM_RX_DESCS_NIC_IF
+						   (octeon_get_conf(octeon_dev),
+						   lio->ifidx),
+					   CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
+						   (octeon_get_conf(octeon_dev),
+						   lio->ifidx), NULL);
+		if (retval) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				" %s : Runtime DROQ(RxQ) creation failed.\n",
+				__func__);
+			return 1;
+		}
+
+		droq = octeon_dev->droq[q_no];
+		napi = &droq->napi;
+		netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+
+		/* designate a CPU for this droq */
+		droq->cpu_id = cpu_id;
+		cpu_id++;
+		if (cpu_id >= cpu_id_modulus)
+			cpu_id = 0;
+
+		octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+	}
+
+	/* set up IQs. */
+	for (q = 0; q < lio->linfo.num_txpciq; q++) {
+		num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
+							   (octeon_dev),
+							   lio->ifidx);
+		retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
+					 num_tx_descs,
+					 netdev_get_tx_queue(net_device, q));
+		if (retval) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				" %s : Runtime IQ(TxQ) creation failed.\n",
+				__func__);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * \brief Poll routine for checking transmit queue status
+ * @param work work_struct data structure
+ */
+static void octnet_poll_check_txq_status(struct work_struct *work)
+{
+	struct cavium_wk *wk = (struct cavium_wk *)work;
+	struct lio *lio = (struct lio *)wk->ctxptr;
+
+	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
+		return;
+
+	check_txq_status(lio);
+	queue_delayed_work(lio->txq_status_wq.wq,
+			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Sets up the txq poll check
+ * @param netdev network device
+ */
+static inline void setup_tx_poll_fn(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+
+	lio->txq_status_wq.wq = create_workqueue("txq-status");
+	if (!lio->txq_status_wq.wq) {
+		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
+		return;
+	}
+	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
+			  octnet_poll_check_txq_status);
+	lio->txq_status_wq.wk.ctxptr = lio;
+	queue_delayed_work(lio->txq_status_wq.wq,
+			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Net device open for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct napi_struct *napi, *n;
+
+	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+		napi_enable(napi);
+
+	oct_ptp_open(netdev);
+
+	ifstate_set(lio, LIO_IFSTATE_RUNNING);
+	setup_tx_poll_fn(netdev);
+	start_txq(netdev);
+
+	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+	try_module_get(THIS_MODULE);
+
+	/* tell Octeon to start forwarding packets to host */
+	send_rx_ctrl_cmd(lio, 1);
+
+	/* Ready for link status updates */
+	lio->intf_open = 1;
+
+	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
+		 netdev->name);
+
+	return 0;
+}
+
+/**
+ * \brief Net device stop for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+	struct napi_struct *napi, *n;
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+
+	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+	/* Inform that netif carrier is down */
+	lio->intf_open = 0;
+	lio->linfo.link.s.status = 0;
+
+	netif_carrier_off(netdev);
+
+	/* tell Octeon to stop forwarding packets to host */
+	send_rx_ctrl_cmd(lio, 0);
+
+	cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+	flush_workqueue(lio->txq_status_wq.wq);
+	destroy_workqueue(lio->txq_status_wq.wq);
+
+	if (lio->ptp_clock) {
+		ptp_clock_unregister(lio->ptp_clock);
+		lio->ptp_clock = NULL;
+	}
+
+	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+	/* This is a hack that allows DHCP to continue working. */
+	set_bit(__LINK_STATE_START, &lio->netdev->state);
+
+	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+		napi_disable(napi);
+
+	txqs_stop(netdev);
+
+	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+	module_put(THIS_MODULE);
+
+	return 0;
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+	struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+	struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+
+	switch (nctrl->ncmd.s.cmd) {
+	case OCTNET_CMD_CHANGE_DEVFLAGS:
+	case OCTNET_CMD_SET_MULTI_LIST:
+		break;
+
+	case OCTNET_CMD_CHANGE_MACADDR:
+		/* If command is successful, change the MACADDR. */
+		netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
+			   CVM_CAST64(nctrl->udd[0]));
+		dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
+			 netdev->name, CVM_CAST64(nctrl->udd[0]));
+		memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+		break;
+
+	case OCTNET_CMD_CHANGE_MTU:
+		/* If command is successful, change the MTU. */
+		netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
+			   netdev->mtu, nctrl->ncmd.s.param2);
+		dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+			 netdev->name, netdev->mtu,
+			 nctrl->ncmd.s.param2);
+		netdev->mtu = nctrl->ncmd.s.param2;
+		break;
+
+	case OCTNET_CMD_GPIO_ACCESS:
+		netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+		break;
+
+	case OCTNET_CMD_LRO_ENABLE:
+		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+		break;
+
+	case OCTNET_CMD_LRO_DISABLE:
+		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+			 netdev->name);
+		break;
+
+	case OCTNET_CMD_VERBOSE_ENABLE:
+		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+		break;
+
+	case OCTNET_CMD_VERBOSE_DISABLE:
+		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+			 netdev->name);
+		break;
+
+	case OCTNET_CMD_SET_SETTINGS:
+		dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+			 netdev->name);
+
+		break;
+
+	default:
+		dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+			nctrl->ncmd.s.cmd);
+	}
+}
+
+/**
+ * \brief Converts a mask based on net device flags
+ * @param netdev network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+	if (netdev->flags & IFF_PROMISC)
+		f |= OCTNET_IFFLAG_PROMISC;
+
+	if (netdev->flags & IFF_ALLMULTI)
+		f |= OCTNET_IFFLAG_ALLMULTI;
+
+	if (netdev->flags & IFF_MULTICAST) {
+		f |= OCTNET_IFFLAG_MULTICAST;
+
+		/* Accept all multicast addresses if there are more than we
+		 * can handle
+		 */
+		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+			f |= OCTNET_IFFLAG_ALLMULTI;
+	}
+
+	if (netdev->flags & IFF_BROADCAST)
+		f |= OCTNET_IFFLAG_BROADCAST;
+
+	return f;
+}
+
+/**
+ * \brief Net device set_multicast_list
+ * @param netdev network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+	struct netdev_hw_addr *ha;
+	u64 *mc;
+	int ret, i;
+	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	/* Create a ctrl pkt command to be sent to core app. */
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.ncmd.s.param2 = get_new_flags(netdev);
+	nctrl.ncmd.s.param3 = mc_count;
+	nctrl.ncmd.s.more = mc_count;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+	/* copy all the addresses into the udd */
+	i = 0;
+	mc = &nctrl.udd[0];
+	netdev_for_each_mc_addr(ha, netdev) {
+		*mc = 0;
+		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
+		/* no need to swap bytes */
+
+		if (++mc > &nctrl.udd[mc_count])
+			break;
+	}
+
+	/* Apparently, any activity in this call from the kernel has to
+	 * be atomic. So we won't wait for response.
+	 */
+	nctrl.wait_time = 0;
+
+	nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+			ret);
+	}
+}
+
+/**
+ * \brief Net device set_mac_address
+ * @param netdev network device
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+	int ret = 0;
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct sockaddr *addr = (struct sockaddr *)p;
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+
+	if ((!is_valid_ether_addr(addr->sa_data)) ||
+	    (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+		return -EADDRNOTAVAIL;
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.ncmd.s.param2 = 0;
+	nctrl.ncmd.s.more = 1;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+	nctrl.wait_time = 100;
+
+	nctrl.udd[0] = 0;
+	/* The MAC Address is presented in network byte order. */
+	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
+
+	nparams.resp_order = OCTEON_RESP_ORDERED;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+		return -ENOMEM;
+	}
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
+
+	return 0;
+}
+
+/**
+ * \brief Net device get_stats
+ * @param netdev network device
+ */
+static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct net_device_stats *stats = &netdev->stats;
+	struct octeon_device *oct;
+	u64 pkts = 0, drop = 0, bytes = 0;
+	struct oct_droq_stats *oq_stats;
+	struct oct_iq_stats *iq_stats;
+	int i, iq_no, oq_no;
+
+	oct = lio->oct_dev;
+
+	for (i = 0; i < lio->linfo.num_txpciq; i++) {
+		iq_no = lio->linfo.txpciq[i];
+		iq_stats = &oct->instr_queue[iq_no]->stats;
+		pkts += iq_stats->tx_done;
+		drop += iq_stats->tx_dropped;
+		bytes += iq_stats->tx_tot_bytes;
+	}
+
+	stats->tx_packets = pkts;
+	stats->tx_bytes = bytes;
+	stats->tx_dropped = drop;
+
+	pkts = 0;
+	drop = 0;
+	bytes = 0;
+
+	for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+		oq_no = lio->linfo.rxpciq[i];
+		oq_stats = &oct->droq[oq_no]->stats;
+		pkts += oq_stats->rx_pkts_received;
+		drop += (oq_stats->rx_dropped +
+			 oq_stats->dropped_nodispatch +
+			 oq_stats->dropped_toomany +
+			 oq_stats->dropped_nomem);
+		bytes += oq_stats->rx_bytes_received;
+	}
+
+	stats->rx_bytes = bytes;
+	stats->rx_packets = pkts;
+	stats->rx_dropped = drop;
+
+	return stats;
+}
+
+/**
+ * \brief Net device change_mtu
+ * @param netdev network device
+ */
+static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+	int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
+	int ret = 0;
+
+	/* Limit the MTU to make sure the ethernet packets are between 64 bytes
+	 * and 65535 bytes
+	 */
+	if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
+	    (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+		dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
+		dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
+			(OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
+			(OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+		return -EINVAL;
+	}
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.ncmd.s.param2 = new_mtu;
+	nctrl.wait_time = 100;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+	nparams.resp_order = OCTEON_RESP_ORDERED;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
+		return -1;
+	}
+
+	lio->mtu = new_mtu;
+
+	return 0;
+}
+
+/**
+ * \brief Handler for SIOCSHWTSTAMP ioctl
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct hwtstamp_config conf;
+	struct lio *lio = GET_LIO(netdev);
+
+	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+		return -EFAULT;
+
+	if (conf.flags)
+		return -EINVAL;
+
+	switch (conf.tx_type) {
+	case HWTSTAMP_TX_ON:
+	case HWTSTAMP_TX_OFF:
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (conf.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		conf.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+	else
+		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * \brief ioctl handler
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return hwtstamp_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * \brief handle a Tx timestamp response
+ * @param status response status
+ * @param buf pointer to skb
+ */
+static void handle_timestamp(struct octeon_device *oct,
+			     u32 status,
+			     void *buf)
+{
+	struct octnet_buf_free_info *finfo;
+	struct octeon_soft_command *sc;
+	struct oct_timestamp_resp *resp;
+	struct lio *lio;
+	struct sk_buff *skb = (struct sk_buff *)buf;
+
+	finfo = (struct octnet_buf_free_info *)skb->cb;
+	lio = finfo->lio;
+	sc = finfo->sc;
+	oct = lio->oct_dev;
+	resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+	if (status != OCTEON_REQUEST_DONE) {
+		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+			CVM_CAST64(status));
+		resp->timestamp = 0;
+	}
+
+	octeon_swap_8B_data(&resp->timestamp, 1);
+
+	if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+		struct skb_shared_hwtstamps ts;
+		u64 ns = resp->timestamp;
+
+		netif_info(lio, tx_done, lio->netdev,
+			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+			   skb, (unsigned long long)ns);
+		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+		skb_tstamp_tx(skb, &ts);
+	}
+
+	octeon_free_soft_command(oct, sc);
+	recv_buffer_free(skb);
+}
+
+/* \brief Send a data packet that will be timestamped
+ * @param oct octeon device
+ * @param ndata pointer to network data
+ * @param finfo pointer to private network data
+ */
+static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
+					 struct octnic_data_pkt *ndata,
+					 struct octnet_buf_free_info *finfo,
+					 int xmit_more)
+{
+	int retval;
+	struct octeon_soft_command *sc;
+	struct octeon_instr_ih *ih;
+	struct octeon_instr_rdp *rdp;
+	struct lio *lio;
+	int ring_doorbell;
+
+	lio = finfo->lio;
+
+	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+					    sizeof(struct oct_timestamp_resp));
+	finfo->sc = sc;
+
+	if (!sc) {
+		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+		return IQ_SEND_FAILED;
+	}
+
+	if (ndata->reqtype == REQTYPE_NORESP_NET)
+		ndata->reqtype = REQTYPE_RESP_NET;
+	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+		ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+	sc->callback = handle_timestamp;
+	sc->callback_arg = finfo->skb;
+	sc->iq_no = ndata->q_no;
+
+	ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+	rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+	ring_doorbell = !xmit_more;
+	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+				     sc, ih->dlengsz, ndata->reqtype);
+
+	if (retval) {
+		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+			retval);
+		octeon_free_soft_command(oct, sc);
+	} else {
+		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+	}
+
+	return retval;
+}
+
+static inline int is_ipv4(struct sk_buff *skb)
+{
+	return (skb->protocol == htons(ETH_P_IP)) &&
+	       (ip_hdr(skb)->version == 4);
+}
+
+static inline int is_vlan(struct sk_buff *skb)
+{
+	return skb->protocol == htons(ETH_P_8021Q);
+}
+
+static inline int is_ip_fragmented(struct sk_buff *skb)
+{
+	/* The Don't fragment and Reserved flag fields are ignored.
+	 * IP is fragmented if
+	 * -  the More fragments bit is set (indicating this IP is a fragment
+	 * with more to follow; the current offset could be 0 ).
+	 * -  ths offset field is non-zero.
+	 */
+	return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
+}
+
+static inline int is_ipv6(struct sk_buff *skb)
+{
+	return (skb->protocol == htons(ETH_P_IPV6)) &&
+	       (ipv6_hdr(skb)->version == 6);
+}
+
+static inline int is_with_extn_hdr(struct sk_buff *skb)
+{
+	return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
+	       (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
+}
+
+static inline int is_tcpudp(struct sk_buff *skb)
+{
+	return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+	       (ip_hdr(skb)->protocol == IPPROTO_UDP);
+}
+
+static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
+{
+	u32 tag;
+	struct iphdr *iphdr = ip_hdr(skb);
+
+	tag = crc32(0, &iphdr->protocol, 1);
+	tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
+	tag = crc32(tag, skb_transport_header(skb), 4);
+	return tag;
+}
+
+static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
+{
+	u32 tag;
+	struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
+
+	tag = crc32(0, &ipv6hdr->nexthdr, 1);
+	tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
+	tag = crc32(tag, skb_transport_header(skb), 4);
+	return tag;
+}
+
+/** \brief Transmit networks packets to the Octeon interface
+ * @param skbuff   skbuff struct to be passed to network layer.
+ * @param netdev    pointer to network device
+ * @returns whether the packet was transmitted to the device okay or not
+ *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct lio *lio;
+	struct octnet_buf_free_info *finfo;
+	union octnic_cmd_setup cmdsetup;
+	struct octnic_data_pkt ndata;
+	struct octeon_device *oct;
+	struct oct_iq_stats *stats;
+	int cpu = 0, status = 0;
+	int q_idx = 0, iq_no = 0;
+	int xmit_more;
+	u32 tag = 0;
+
+	lio = GET_LIO(netdev);
+	oct = lio->oct_dev;
+
+	if (netif_is_multiqueue(netdev)) {
+		cpu = skb->queue_mapping;
+		q_idx = (cpu & (lio->linfo.num_txpciq - 1));
+		iq_no = lio->linfo.txpciq[q_idx];
+	} else {
+		iq_no = lio->txq;
+	}
+
+	stats = &oct->instr_queue[iq_no]->stats;
+
+	/* Check for all conditions in which the current packet cannot be
+	 * transmitted.
+	 */
+	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+	    (!lio->linfo.link.s.status) ||
+	    (skb->len <= 0)) {
+		netif_info(lio, tx_err, lio->netdev,
+			   "Transmit failed link_status : %d\n",
+			   lio->linfo.link.s.status);
+		goto lio_xmit_failed;
+	}
+
+	/* Use space in skb->cb to store info used to unmap and
+	 * free the buffers.
+	 */
+	finfo = (struct octnet_buf_free_info *)skb->cb;
+	finfo->lio = lio;
+	finfo->skb = skb;
+	finfo->sc = NULL;
+
+	/* Prepare the attributes for the data to be passed to OSI. */
+	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+	ndata.buf = (void *)finfo;
+
+	ndata.q_no = iq_no;
+
+	if (netif_is_multiqueue(netdev)) {
+		if (octnet_iq_is_full(oct, ndata.q_no)) {
+			/* defer sending if queue is full */
+			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+				   ndata.q_no);
+			stats->tx_iq_busy++;
+			return NETDEV_TX_BUSY;
+		}
+	} else {
+		if (octnet_iq_is_full(oct, lio->txq)) {
+			/* defer sending if queue is full */
+			stats->tx_iq_busy++;
+			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+				   ndata.q_no);
+			return NETDEV_TX_BUSY;
+		}
+	}
+	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
+	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+	 */
+
+	ndata.datasize = skb->len;
+
+	cmdsetup.u64 = 0;
+	cmdsetup.s.ifidx = lio->linfo.ifidx;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+			tag = get_ipv4_5tuple_tag(skb);
+
+			cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+			if (ip_hdr(skb)->ihl > 5)
+				cmdsetup.s.ipv4opts_ipv6exthdr =
+						OCT_PKT_PARAM_IPV4OPTS;
+
+		} else if (is_ipv6(skb)) {
+			tag = get_ipv6_5tuple_tag(skb);
+
+			cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+			if (is_with_extn_hdr(skb))
+				cmdsetup.s.ipv4opts_ipv6exthdr =
+						OCT_PKT_PARAM_IPV6EXTHDR;
+
+		} else if (is_vlan(skb)) {
+			if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+				== htons(ETH_P_IP) &&
+				!is_ip_fragmented(skb) && is_tcpudp(skb)) {
+				tag = get_ipv4_5tuple_tag(skb);
+
+				cmdsetup.s.cksum_offset =
+					sizeof(struct vlan_ethhdr) + 1;
+
+				if (ip_hdr(skb)->ihl > 5)
+					cmdsetup.s.ipv4opts_ipv6exthdr =
+						OCT_PKT_PARAM_IPV4OPTS;
+
+			} else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+				== htons(ETH_P_IPV6)) {
+				tag = get_ipv6_5tuple_tag(skb);
+
+				cmdsetup.s.cksum_offset =
+					sizeof(struct vlan_ethhdr) + 1;
+
+				if (is_with_extn_hdr(skb))
+					cmdsetup.s.ipv4opts_ipv6exthdr =
+						OCT_PKT_PARAM_IPV6EXTHDR;
+			}
+		}
+	}
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		cmdsetup.s.timestamp = 1;
+	}
+
+	if (skb_shinfo(skb)->nr_frags == 0) {
+		cmdsetup.s.u.datasize = skb->len;
+		octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+		/* Offload checksum calculation for TCP/UDP packets */
+		ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+						skb->data,
+						skb->len,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+				__func__);
+			return NETDEV_TX_BUSY;
+		}
+
+		finfo->dptr = ndata.cmd.dptr;
+
+		ndata.reqtype = REQTYPE_NORESP_NET;
+
+	} else {
+		int i, frags;
+		struct skb_frag_struct *frag;
+		struct octnic_gather *g;
+
+		spin_lock(&lio->lock);
+		g = (struct octnic_gather *)list_delete_head(&lio->glist);
+		spin_unlock(&lio->lock);
+
+		if (!g) {
+			netif_info(lio, tx_err, lio->netdev,
+				   "Transmit scatter gather: glist null!\n");
+			goto lio_xmit_failed;
+		}
+
+		cmdsetup.s.gather = 1;
+		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+		octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+
+		memset(g->sg, 0, g->sg_size);
+
+		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+						 skb->data,
+						 (skb->len - skb->data_len),
+						 DMA_TO_DEVICE);
+		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+				__func__);
+			return NETDEV_TX_BUSY;
+		}
+		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+		frags = skb_shinfo(skb)->nr_frags;
+		i = 1;
+		while (frags--) {
+			frag = &skb_shinfo(skb)->frags[i - 1];
+
+			g->sg[(i >> 2)].ptr[(i & 3)] =
+				dma_map_page(&oct->pci_dev->dev,
+					     frag->page.p,
+					     frag->page_offset,
+					     frag->size,
+					     DMA_TO_DEVICE);
+
+			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+			i++;
+		}
+
+		ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+						g->sg, g->sg_size,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+				__func__);
+			dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
+					 skb->len - skb->data_len,
+					 DMA_TO_DEVICE);
+			return NETDEV_TX_BUSY;
+		}
+
+		finfo->dptr = ndata.cmd.dptr;
+		finfo->g = g;
+
+		ndata.reqtype = REQTYPE_NORESP_NET_SG;
+	}
+
+	if (skb_shinfo(skb)->gso_size) {
+		struct octeon_instr_irh *irh =
+			(struct octeon_instr_irh *)&ndata.cmd.irh;
+		union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+
+		irh->len = 1;   /* to indicate that ossp[0] contains tx_info */
+		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+	}
+
+	xmit_more = skb->xmit_more;
+
+	if (unlikely(cmdsetup.s.timestamp))
+		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+	else
+		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+	if (status == IQ_SEND_FAILED)
+		goto lio_xmit_failed;
+
+	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+	if (status == IQ_SEND_STOP)
+		stop_q(lio->netdev, q_idx);
+
+	netdev->trans_start = jiffies;
+
+	stats->tx_done++;
+	stats->tx_tot_bytes += skb->len;
+
+	return NETDEV_TX_OK;
+
+lio_xmit_failed:
+	stats->tx_dropped++;
+	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+		   iq_no, stats->tx_dropped);
+	dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+			 ndata.datasize, DMA_TO_DEVICE);
+	recv_buffer_free(skb);
+	return NETDEV_TX_OK;
+}
+
+/** \brief Network device Tx timeout
+ * @param netdev    pointer to network device
+ */
+static void liquidio_tx_timeout(struct net_device *netdev)
+{
+	struct lio *lio;
+
+	lio = GET_LIO(netdev);
+
+	netif_info(lio, tx_err, lio->netdev,
+		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+		   netdev->stats.tx_dropped);
+	netdev->trans_start = jiffies;
+	txqs_wake(netdev);
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct octnic_ctrl_pkt nctrl;
+	struct octnic_ctrl_params nparams;
+	int ret = 0;
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = cmd;
+	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+	nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+	nctrl.wait_time = 100;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+	nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+			ret);
+	}
+	return ret;
+}
+
+/** \brief Net device fix features
+ * @param netdev  pointer to network device
+ * @param request features requested
+ * @returns updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+					       netdev_features_t request)
+{
+	struct lio *lio = netdev_priv(netdev);
+
+	if ((request & NETIF_F_RXCSUM) &&
+	    !(lio->dev_capability & NETIF_F_RXCSUM))
+		request &= ~NETIF_F_RXCSUM;
+
+	if ((request & NETIF_F_HW_CSUM) &&
+	    !(lio->dev_capability & NETIF_F_HW_CSUM))
+		request &= ~NETIF_F_HW_CSUM;
+
+	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+		request &= ~NETIF_F_TSO;
+
+	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+		request &= ~NETIF_F_TSO6;
+
+	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+		request &= ~NETIF_F_LRO;
+
+	/*Disable LRO if RXCSUM is off */
+	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+	    (lio->dev_capability & NETIF_F_LRO))
+		request &= ~NETIF_F_LRO;
+
+	return request;
+}
+
+/** \brief Net device set features
+ * @param netdev  pointer to network device
+ * @param features features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+				 netdev_features_t features)
+{
+	struct lio *lio = netdev_priv(netdev);
+
+	if (!((netdev->features ^ features) & NETIF_F_LRO))
+		return 0;
+
+	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+	else if (!(features & NETIF_F_LRO) &&
+		 (lio->dev_capability & NETIF_F_LRO))
+		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+
+	return 0;
+}
+
+static struct net_device_ops lionetdevops = {
+	.ndo_open		= liquidio_open,
+	.ndo_stop		= liquidio_stop,
+	.ndo_start_xmit		= liquidio_xmit,
+	.ndo_get_stats		= liquidio_get_stats,
+	.ndo_set_mac_address	= liquidio_set_mac,
+	.ndo_set_rx_mode	= liquidio_set_mcast_list,
+	.ndo_tx_timeout		= liquidio_tx_timeout,
+	.ndo_change_mtu		= liquidio_change_mtu,
+	.ndo_do_ioctl		= liquidio_ioctl,
+	.ndo_fix_features	= liquidio_fix_features,
+	.ndo_set_features	= liquidio_set_features,
+};
+
+/** \brief Entry point for the liquidio module
+ */
+static int __init liquidio_init(void)
+{
+	int i;
+	struct handshake *hs;
+
+	init_completion(&first_stage);
+
+	octeon_init_device_list(conf_type);
+
+	if (liquidio_init_pci())
+		return -EINVAL;
+
+	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
+
+	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+		hs = &handshake[i];
+		if (hs->pci_dev) {
+			wait_for_completion(&hs->init);
+			if (!hs->init_ok) {
+				/* init handshake failed */
+				dev_err(&hs->pci_dev->dev,
+					"Failed to init device\n");
+				liquidio_deinit_pci();
+				return -EIO;
+			}
+		}
+	}
+
+	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+		hs = &handshake[i];
+		if (hs->pci_dev) {
+			wait_for_completion_timeout(&hs->started,
+						    msecs_to_jiffies(30000));
+			if (!hs->started_ok) {
+				/* starter handshake failed */
+				dev_err(&hs->pci_dev->dev,
+					"Firmware failed to start\n");
+				liquidio_deinit_pci();
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+	struct octeon_device *oct = (struct octeon_device *)buf;
+	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+	int ifidx = 0;
+	union oct_link_status *ls;
+	int i;
+
+	if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
+	    (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+			recv_pkt->buffer_size[0],
+			recv_pkt->rh.r_nic_info.ifidx);
+		goto nic_info_err;
+	}
+
+	ifidx = recv_pkt->rh.r_nic_info.ifidx;
+	ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+
+	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+
+	update_link_status(oct->props[ifidx].netdev, ls);
+
+nic_info_err:
+	for (i = 0; i < recv_pkt->buffer_count; i++)
+		recv_buffer_free(recv_pkt->buffer_ptr[i]);
+	octeon_free_recv_info(recv_info);
+	return 0;
+}
+
+/**
+ * \brief Setup network interfaces
+ * @param octeon_dev  octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running.  The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+	struct lio *lio = NULL;
+	struct net_device *netdev;
+	u8 mac[6], i, j;
+	struct octeon_soft_command *sc;
+	struct liquidio_if_cfg_context *ctx;
+	struct liquidio_if_cfg_resp *resp;
+	struct octdev_props *props;
+	int retval, num_iqueues, num_oqueues, q_no;
+	u64 q_mask;
+	int num_cpus = num_online_cpus();
+	union oct_nic_if_cfg if_cfg;
+	unsigned int base_queue;
+	unsigned int gmx_port_id;
+	u32 resp_size, ctx_size;
+
+	/* This is to handle link status changes */
+	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+				    OPCODE_NIC_INFO,
+				    lio_nic_info, octeon_dev);
+
+	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+	 * They are handled directly.
+	 */
+	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+					free_netbuf);
+
+	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+					free_netsgbuf);
+
+	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+					free_netsgbuf_with_resp);
+
+	for (i = 0; i < octeon_dev->ifcount; i++) {
+		resp_size = sizeof(struct liquidio_if_cfg_resp);
+		ctx_size = sizeof(struct liquidio_if_cfg_context);
+		sc = (struct octeon_soft_command *)
+			octeon_alloc_soft_command(octeon_dev, 0,
+						  resp_size, ctx_size);
+		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+		num_iqueues =
+			CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+		num_oqueues =
+			CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+		base_queue =
+			CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
+		gmx_port_id =
+			CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
+		if (num_iqueues > num_cpus)
+			num_iqueues = num_cpus;
+		if (num_oqueues > num_cpus)
+			num_oqueues = num_cpus;
+		dev_dbg(&octeon_dev->pci_dev->dev,
+			"requesting config for interface %d, iqs %d, oqs %d\n",
+			i, num_iqueues, num_oqueues);
+		ACCESS_ONCE(ctx->cond) = 0;
+		ctx->octeon_id = lio_get_device_id(octeon_dev);
+		init_waitqueue_head(&ctx->wc);
+
+		if_cfg.u64 = 0;
+		if_cfg.s.num_iqueues = num_iqueues;
+		if_cfg.s.num_oqueues = num_oqueues;
+		if_cfg.s.base_queue = base_queue;
+		if_cfg.s.gmx_port_id = gmx_port_id;
+		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+					    OPCODE_NIC_IF_CFG, i,
+					    if_cfg.u64, 0);
+
+		sc->callback = if_cfg_callback;
+		sc->callback_arg = sc;
+		sc->wait_time = 1000;
+
+		retval = octeon_send_soft_command(octeon_dev, sc);
+		if (retval) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				"iq/oq config failed status: %x\n",
+				retval);
+			/* Soft instr is freed by driver in case of failure. */
+			goto setup_nic_dev_fail;
+		}
+
+		/* Sleep on a wait queue till the cond flag indicates that the
+		 * response arrived or timed-out.
+		 */
+		sleep_cond(&ctx->wc, &ctx->cond);
+		retval = resp->status;
+		if (retval) {
+			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
+			goto setup_nic_dev_fail;
+		}
+
+		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+		num_iqueues = hweight64(resp->cfg_info.iqmask);
+		num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+		if (!(num_iqueues) || !(num_oqueues)) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+				resp->cfg_info.iqmask,
+				resp->cfg_info.oqmask);
+			goto setup_nic_dev_fail;
+		}
+		dev_dbg(&octeon_dev->pci_dev->dev,
+			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+			num_iqueues, num_oqueues);
+		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+
+		if (!netdev) {
+			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+			goto setup_nic_dev_fail;
+		}
+
+		props = &octeon_dev->props[i];
+		props->netdev = netdev;
+
+		if (num_iqueues > 1)
+			lionetdevops.ndo_select_queue = select_q;
+
+		/* Associate the routines that will handle different
+		 * netdev tasks.
+		 */
+		netdev->netdev_ops = &lionetdevops;
+
+		lio = GET_LIO(netdev);
+
+		memset(lio, 0, sizeof(struct lio));
+
+		lio->linfo.ifidx = resp->cfg_info.ifidx;
+		lio->ifidx = resp->cfg_info.ifidx;
+
+		lio->linfo.num_rxpciq = num_oqueues;
+		lio->linfo.num_txpciq = num_iqueues;
+		q_mask = resp->cfg_info.oqmask;
+		/* q_mask is 0-based and already verified mask is nonzero */
+		for (j = 0; j < num_oqueues; j++) {
+			q_no = __ffs64(q_mask);
+			q_mask &= (~(1UL << q_no));
+			lio->linfo.rxpciq[j] = q_no;
+		}
+		q_mask = resp->cfg_info.iqmask;
+		for (j = 0; j < num_iqueues; j++) {
+			q_no = __ffs64(q_mask);
+			q_mask &= (~(1UL << q_no));
+			lio->linfo.txpciq[j] = q_no;
+		}
+		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+
+		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+		lio->dev_capability = NETIF_F_HIGHDMA
+				      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+				      | NETIF_F_SG | NETIF_F_RXCSUM
+				      | NETIF_F_TSO | NETIF_F_TSO6
+				      | NETIF_F_LRO;
+		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+		netdev->features = lio->dev_capability;
+		netdev->vlan_features = lio->dev_capability;
+
+		netdev->hw_features = lio->dev_capability;
+
+		/* Point to the  properties for octeon device to which this
+		 * interface belongs.
+		 */
+		lio->oct_dev = octeon_dev;
+		lio->octprops = props;
+		lio->netdev = netdev;
+		spin_lock_init(&lio->lock);
+
+		dev_dbg(&octeon_dev->pci_dev->dev,
+			"if%d gmx: %d hw_addr: 0x%llx\n", i,
+			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+		/* 64-bit swap required on LE machines */
+		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+		for (j = 0; j < 6; j++)
+			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+		/* Copy MAC Address to OS network device structure */
+
+		ether_addr_copy(netdev->dev_addr, mac);
+
+		if (setup_io_queues(octeon_dev, netdev)) {
+			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+			goto setup_nic_dev_fail;
+		}
+
+		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+		/* By default all interfaces on a single Octeon uses the same
+		 * tx and rx queues
+		 */
+		lio->txq = lio->linfo.txpciq[0];
+		lio->rxq = lio->linfo.rxpciq[0];
+
+		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+		if (setup_glist(lio)) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				"Gather list allocation failed\n");
+			goto setup_nic_dev_fail;
+		}
+
+		/* Register ethtool support */
+		liquidio_set_ethtool_ops(netdev);
+
+		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+
+		if ((debug != -1) && (debug & NETIF_MSG_HW))
+			liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+
+		/* Register the network device with the OS */
+		if (register_netdev(netdev)) {
+			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+			goto setup_nic_dev_fail;
+		}
+
+		dev_dbg(&octeon_dev->pci_dev->dev,
+			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+		netif_carrier_off(netdev);
+
+		if (lio->linfo.link.s.status) {
+			netif_carrier_on(netdev);
+			start_txq(netdev);
+		} else {
+			netif_carrier_off(netdev);
+		}
+
+		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+		dev_dbg(&octeon_dev->pci_dev->dev,
+			"NIC ifidx:%d Setup successful\n", i);
+
+		octeon_free_soft_command(octeon_dev, sc);
+	}
+
+	return 0;
+
+setup_nic_dev_fail:
+
+	octeon_free_soft_command(octeon_dev, sc);
+
+	while (i--) {
+		dev_err(&octeon_dev->pci_dev->dev,
+			"NIC ifidx:%d Setup failed\n", i);
+		liquidio_destroy_nic_device(octeon_dev, i);
+	}
+	return -ENODEV;
+}
+
+/**
+ * \brief initialize the NIC
+ * @param oct octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+	struct oct_intrmod_cfg *intrmod_cfg;
+	int retval = 0;
+	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
+
+	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+	/* only default iq and oq were initialized
+	 * initialize the rest as well
+	 */
+	/* run port_config command for each port */
+	oct->ifcount = num_nic_ports;
+
+	memset(oct->props, 0,
+	       sizeof(struct octdev_props) * num_nic_ports);
+
+	retval = setup_nic_devices(oct);
+	if (retval) {
+		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+		goto octnet_init_failure;
+	}
+
+	liquidio_ptp_init(oct);
+
+	/* Initialize interrupt moderation params */
+	intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
+	intrmod_cfg->intrmod_enable = 1;
+	intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+	intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+	intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+	intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
+	intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
+	intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
+	intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
+
+	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+	return retval;
+
+octnet_init_failure:
+
+	oct->ifcount = 0;
+
+	return retval;
+}
+
+/**
+ * \brief starter callback that invokes the remaining initialization work after
+ * the NIC is up and running.
+ * @param octptr  work struct work_struct
+ */
+static void nic_starter(struct work_struct *work)
+{
+	struct octeon_device *oct;
+	struct cavium_wk *wk = (struct cavium_wk *)work;
+
+	oct = (struct octeon_device *)wk->ctxptr;
+
+	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
+		return;
+
+	/* If the status of the device is CORE_OK, the core
+	 * application has reported its application type. Call
+	 * any registered handlers now and move to the RUNNING
+	 * state.
+	 */
+	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
+		schedule_delayed_work(&oct->nic_poll_work.work,
+				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+		return;
+	}
+
+	atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
+		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
+
+		if (liquidio_init_nic_module(oct))
+			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
+		else
+			handshake[oct->octeon_id].started_ok = 1;
+	} else {
+		dev_err(&oct->pci_dev->dev,
+			"Unexpected application running on NIC (%d). Check firmware.\n",
+			oct->app_mode);
+	}
+
+	complete(&handshake[oct->octeon_id].started);
+}
+
+/**
+ * \brief Device initialization for each Octeon device that is probed
+ * @param octeon_dev  octeon device
+ */
+static int octeon_device_init(struct octeon_device *octeon_dev)
+{
+	int j, ret;
+	struct octeon_device_priv *oct_priv =
+		(struct octeon_device_priv *)octeon_dev->priv;
+	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
+
+	/* Enable access to the octeon device and make its DMA capability
+	 * known to the OS.
+	 */
+	if (octeon_pci_os_setup(octeon_dev))
+		return 1;
+
+	/* Identify the Octeon type and map the BAR address space. */
+	if (octeon_chip_specific_setup(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
+		return 1;
+	}
+
+	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+
+	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
+
+	/* Do a soft reset of the Octeon device. */
+	if (octeon_dev->fn_list.soft_reset(octeon_dev))
+		return 1;
+
+	/* Initialize the dispatch mechanism used to push packets arriving on
+	 * Octeon Output queues.
+	 */
+	if (octeon_init_dispatch_list(octeon_dev))
+		return 1;
+
+	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+				    OPCODE_NIC_CORE_DRV_ACTIVE,
+				    octeon_core_drv_init,
+				    octeon_dev);
+
+	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
+	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
+	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
+			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+
+	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+	octeon_set_io_queues_off(octeon_dev);
+
+	/*  Setup the data structures that manage this Octeon's Input queues. */
+	if (octeon_setup_instr_queues(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev,
+			"instruction queue initialization failed\n");
+		/* On error, release any previously allocated queues */
+		for (j = 0; j < octeon_dev->num_iqs; j++)
+			octeon_delete_instr_queue(octeon_dev, j);
+		return 1;
+	}
+	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+	/* Initialize soft command buffer pool
+	 */
+	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+		return 1;
+	}
+	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+	/* Initialize lists to manage the requests of different types that
+	 * arrive from user & kernel applications for this octeon device.
+	 */
+	if (octeon_setup_response_list(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
+		return 1;
+	}
+	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+	if (octeon_setup_output_queues(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
+		/* Release any previously allocated queues */
+		for (j = 0; j < octeon_dev->num_oqs; j++)
+			octeon_delete_droq(octeon_dev, j);
+	}
+
+	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
+
+	/* The input and output queue registers were setup earlier (the queues
+	 * were not enabled). Any additional registers that need to be
+	 * programmed should be done now.
+	 */
+	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+	if (ret) {
+		dev_err(&octeon_dev->pci_dev->dev,
+			"Failed to configure device registers\n");
+		return ret;
+	}
+
+	/* Initialize the tasklet that handles output queue packet processing.*/
+	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
+	tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
+		     (unsigned long)octeon_dev);
+
+	/* Setup the interrupt handler and record the INT SUM register address
+	 */
+	octeon_setup_interrupt(octeon_dev);
+
+	/* Enable Octeon device interrupts */
+	octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+
+	/* Enable the input and output queues for this Octeon device */
+	octeon_dev->fn_list.enable_io_queues(octeon_dev);
+
+	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
+
+	dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+
+	if (ddr_timeout == 0) {
+		dev_info(&octeon_dev->pci_dev->dev,
+			 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+	}
+
+	schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+
+	/* Wait for the octeon to initialize DDR after the soft-reset. */
+	ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+	if (ret) {
+		dev_err(&octeon_dev->pci_dev->dev,
+			"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+			ret);
+		return 1;
+	}
+
+	if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
+		dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+		return 1;
+	}
+
+	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+	ret = octeon_init_consoles(octeon_dev);
+	if (ret) {
+		dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+		return 1;
+	}
+	ret = octeon_add_console(octeon_dev, 0);
+	if (ret) {
+		dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+		return 1;
+	}
+
+	atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+
+	dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+	ret = load_firmware(octeon_dev);
+	if (ret) {
+		dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+		return 1;
+	}
+
+	handshake[octeon_dev->octeon_id].init_ok = 1;
+	complete(&handshake[octeon_dev->octeon_id].init);
+
+	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+
+	/* Send Credit for Octeon Output queues. Credits are always sent after
+	 * the output queue is enabled.
+	 */
+	for (j = 0; j < octeon_dev->num_oqs; j++)
+		writel(octeon_dev->droq[j]->max_count,
+		       octeon_dev->droq[j]->pkts_credit_reg);
+
+	/* Packets can start arriving on the output queues from this point. */
+
+	return 0;
+}
+
+/**
+ * \brief Exits the module
+ */
+static void __exit liquidio_exit(void)
+{
+	liquidio_deinit_pci();
+
+	pr_info("LiquidIO network module is now unloaded\n");
+}
+
+module_init(liquidio_init);
+module_exit(liquidio_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
new file mode 100644
index 0000000..0ac347c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -0,0 +1,673 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*!  \file  liquidio_common.h
+ *   \brief Common: Structures and macros used in PCI-NIC package by core and
+ *   host driver.
+ */
+
+#ifndef __LIQUIDIO_COMMON_H__
+#define __LIQUIDIO_COMMON_H__
+
+#include "octeon_config.h"
+
+#define LIQUIDIO_VERSION        "1.1.9"
+#define LIQUIDIO_MAJOR_VERSION  1
+#define LIQUIDIO_MINOR_VERSION  1
+#define LIQUIDIO_MICRO_VERSION  9
+
+#define CONTROL_IQ 0
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+	ORDERED_TAG = 0,
+	ATOMIC_TAG = 1,
+	NULL_TAG = 2,
+	NULL_NULL_TAG = 3
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL  (0x11111110)
+#define LIO_DATA(i)  (0x11111111 + (i))
+
+/* Opcodes used by host driver/apps to perform operations on the core.
+ * These are used to identify the major subsystem that the operation
+ * is for.
+ */
+#define OPCODE_CORE 0           /* used for generic core operations */
+#define OPCODE_NIC  1           /* used for NIC operations */
+#define OPCODE_LAST OPCODE_NIC
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define OPCODE_SUBCODE(op, sub)       (((op & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** OPCODE_CORE subcodes. For future use. */
+
+/** OPCODE_NIC subcodes */
+
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define OPCODE_NIC_CORE_DRV_ACTIVE     0x01
+#define OPCODE_NIC_NW_DATA             0x02     /* network packet data */
+#define OPCODE_NIC_CMD                 0x03
+#define OPCODE_NIC_INFO                0x04
+#define OPCODE_NIC_PORT_STATS          0x05
+#define OPCODE_NIC_MDIO45              0x06
+#define OPCODE_NIC_TIMESTAMP           0x07
+#define OPCODE_NIC_INTRMOD_CFG         0x08
+#define OPCODE_NIC_IF_CFG              0x09
+
+#define CORE_DRV_TEST_SCATTER_OP    0xFFF5
+
+#define OPCODE_SLOW_PATH(rh)  \
+	(OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \
+		OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA))
+
+/* Application codes advertised by the core driver initialization packet. */
+#define CVM_DRV_APP_START           0x0
+#define CVM_DRV_NO_APP              0
+#define CVM_DRV_APP_COUNT           0x2
+#define CVM_DRV_BASE_APP            (CVM_DRV_APP_START + 0x0)
+#define CVM_DRV_NIC_APP             (CVM_DRV_APP_START + 0x1)
+#define CVM_DRV_INVALID_APP         (CVM_DRV_APP_START + 0x2)
+#define CVM_DRV_APP_END             (CVM_DRV_INVALID_APP - 1)
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+#define INCR_INDEX(index, count, max)                \
+do {                                                 \
+	if (((index) + (count)) >= (max))            \
+		index = ((index) + (count)) - (max); \
+	else                                         \
+		index += (count);                    \
+} while (0)
+
+#define INCR_INDEX_BY1(index, max)	\
+do {                                    \
+	if ((++(index)) == (max))       \
+		index = 0;	        \
+} while (0)
+
+#define DECR_INDEX(index, count, max)                  \
+do {						       \
+	if ((count) > (index))                         \
+		index = ((max) - ((count - index)));   \
+	else                                           \
+		index -= count;			       \
+} while (0)
+
+#define OCT_BOARD_NAME 32
+#define OCT_SERIAL_LEN 64
+
+/* Structure used by core driver to send indication that the Octeon
+ * application is ready.
+ */
+struct octeon_core_setup {
+	u64 corefreq;
+
+	char boardname[OCT_BOARD_NAME];
+
+	char board_serial_number[OCT_SERIAL_LEN];
+
+	u64 board_rev_major;
+
+	u64 board_rev_minor;
+
+};
+
+/*---------------------------  SCATTER GATHER ENTRY  -----------------------*/
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * a Octeon input instruction has this format.
+ */
+struct octeon_sg_entry {
+	/** The first 64 bit gives the size of data in each dptr.*/
+	union {
+		u16 size[4];
+		u64 size64;
+	} u;
+
+	/** The 4 dptr pointers for this entry. */
+	u64 ptr[4];
+
+};
+
+#define OCT_SG_ENTRY_SIZE    (sizeof(struct octeon_sg_entry))
+
+/* \brief Add size to gather list
+ * @param sg_entry scatter/gather entry
+ * @param size size to add
+ * @param pos position to add it.
+ */
+static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
+			       u16 size,
+			       u32 pos)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+	sg_entry->u.size[pos] = size;
+#else
+	sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/*------------------------- End Scatter/Gather ---------------------------*/
+
+#define   OCTNET_FRM_PTP_HEADER_SIZE  8
+#define   OCTNET_FRM_HEADER_SIZE     30 /* PTP timestamp + VLAN + Ethernet */
+
+#define   OCTNET_MIN_FRM_SIZE        (64  + OCTNET_FRM_PTP_HEADER_SIZE)
+#define   OCTNET_MAX_FRM_SIZE        (16000 + OCTNET_FRM_HEADER_SIZE)
+
+#define   OCTNET_DEFAULT_FRM_SIZE    (1500 + OCTNET_FRM_HEADER_SIZE)
+
+/** NIC Commands are sent using this Octeon Input Queue */
+#define   OCTNET_CMD_Q                0
+
+/* NIC Command types */
+#define   OCTNET_CMD_CHANGE_MTU       0x1
+#define   OCTNET_CMD_CHANGE_MACADDR   0x2
+#define   OCTNET_CMD_CHANGE_DEVFLAGS  0x3
+#define   OCTNET_CMD_RX_CTL           0x4
+
+#define	  OCTNET_CMD_SET_MULTI_LIST   0x5
+#define   OCTNET_CMD_CLEAR_STATS      0x6
+
+/* command for setting the speed, duplex & autoneg */
+#define   OCTNET_CMD_SET_SETTINGS     0x7
+#define   OCTNET_CMD_SET_FLOW_CTL     0x8
+
+#define   OCTNET_CMD_MDIO_READ_WRITE  0x9
+#define   OCTNET_CMD_GPIO_ACCESS      0xA
+#define   OCTNET_CMD_LRO_ENABLE       0xB
+#define   OCTNET_CMD_LRO_DISABLE      0xC
+#define   OCTNET_CMD_SET_RSS          0xD
+#define   OCTNET_CMD_WRITE_SA         0xE
+#define   OCTNET_CMD_DELETE_SA        0xF
+#define   OCTNET_CMD_UPDATE_SA        0x12
+
+#define   OCTNET_CMD_TNL_RX_CSUM_CTL 0x10
+#define   OCTNET_CMD_TNL_TX_CSUM_CTL 0x11
+#define   OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13
+#define   OCTNET_CMD_VERBOSE_ENABLE   0x14
+#define   OCTNET_CMD_VERBOSE_DISABLE  0x15
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define   CNNIC_L4SUM_VERIFIED             0x1
+#define   CNNIC_IPSUM_VERIFIED             0x2
+#define   CNNIC_TUN_CSUM_VERIFIED          0x4
+#define   CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED)
+
+/*LROIPV4 and LROIPV6 Flags*/
+#define   OCTNIC_LROIPV4    0x1
+#define   OCTNIC_LROIPV6    0x2
+
+/* Interface flags communicated between host driver and core app. */
+enum octnet_ifflags {
+	OCTNET_IFFLAG_PROMISC   = 0x01,
+	OCTNET_IFFLAG_ALLMULTI  = 0x02,
+	OCTNET_IFFLAG_MULTICAST = 0x04,
+	OCTNET_IFFLAG_BROADCAST = 0x08,
+	OCTNET_IFFLAG_UNICAST   = 0x10
+};
+
+/*   wqe
+ *  ---------------  0
+ * |  wqe  word0-3 |
+ *  ---------------  32
+ * |    PCI IH     |
+ *  ---------------  40
+ * |     RPTR      |
+ *  ---------------  48
+ * |    PCI IRH    |
+ *  ---------------  56
+ * |  OCT_NET_CMD  |
+ *  ---------------  64
+ * | Addtl 8-BData |
+ * |               |
+ *  ---------------
+ */
+
+union octnet_cmd {
+	u64 u64;
+
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		u64 cmd:5;
+
+		u64 more:6; /* How many udd words follow the command */
+
+		u64 param1:29;
+
+		u64 param2:16;
+
+		u64 param3:8;
+
+#else
+
+		u64 param3:8;
+
+		u64 param2:16;
+
+		u64 param1:29;
+
+		u64 more:6;
+
+		u64 cmd:5;
+
+#endif
+	} s;
+
+};
+
+#define   OCTNET_CMD_SIZE     (sizeof(union octnet_cmd))
+
+/** Instruction Header */
+struct octeon_instr_ih {
+#ifdef __BIG_ENDIAN_BITFIELD
+	/** Raw mode indicator 1 = RAW */
+	u64 raw:1;
+
+	/** Gather indicator 1=gather*/
+	u64 gather:1;
+
+	/** Data length OR no. of entries in gather list */
+	u64 dlengsz:14;
+
+	/** Front Data size */
+	u64 fsz:6;
+
+	/** Packet Order / Work Unit selection (1 of 8)*/
+	u64 qos:3;
+
+	/** Core group selection (1 of 16) */
+	u64 grp:4;
+
+	/** Short Raw Packet Indicator 1=short raw pkt */
+	u64 rs:1;
+
+	/** Tag type */
+	u64 tagtype:2;
+
+	/** Tag Value */
+	u64 tag:32;
+#else
+	/** Tag Value */
+	u64 tag:32;
+
+	/** Tag type */
+	u64 tagtype:2;
+
+	/** Short Raw Packet Indicator 1=short raw pkt */
+	u64 rs:1;
+
+	/** Core group selection (1 of 16) */
+	u64 grp:4;
+
+	/** Packet Order / Work Unit selection (1 of 8)*/
+	u64 qos:3;
+
+	/** Front Data size */
+	u64 fsz:6;
+
+	/** Data length OR no. of entries in gather list */
+	u64 dlengsz:14;
+
+	/** Gather indicator 1=gather*/
+	u64 gather:1;
+
+	/** Raw mode indicator 1 = RAW */
+	u64 raw:1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u64 opcode:4;
+	u64 rflag:1;
+	u64 subcode:7;
+	u64 len:3;
+	u64 rid:13;
+	u64 reserved:4;
+	u64 ossp:32;             /* opcode/subcode specific parameters */
+#else
+	u64 ossp:32;             /* opcode/subcode specific parameters */
+	u64 reserved:4;
+	u64 rid:13;
+	u64 len:3;
+	u64 subcode:7;
+	u64 rflag:1;
+	u64 opcode:4;
+#endif
+};
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u64 reserved:49;
+	u64 pcie_port:3;
+	u64 rlen:12;
+#else
+	u64 rlen:12;
+	u64 pcie_port:3;
+	u64 reserved:49;
+#endif
+};
+
+/** Receive Header */
+union octeon_rh {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u64 u64;
+	struct {
+		u64 opcode:4;
+		u64 subcode:8;
+		u64 len:3;       /** additional 64-bit words */
+		u64 rid:13;      /** request id in response to pkt sent by host */
+		u64 reserved:4;
+		u64 ossp:32;     /** opcode/subcode specific parameters */
+	} r;
+	struct {
+		u64 opcode:4;
+		u64 subcode:8;
+		u64 len:3;       /** additional 64-bit words */
+		u64 rid:13;      /** request id in response to pkt sent by host */
+		u64 extra:24;
+		u64 link:8;
+		u64 csum_verified:3;     /** checksum verified. */
+		u64 has_hwtstamp:1;      /** Has hardware timestamp. 1 = yes. */
+	} r_dh;
+	struct {
+		u64 opcode:4;
+		u64 subcode:8;
+		u64 len:3;       /** additional 64-bit words */
+		u64 rid:13;      /** request id in response to pkt sent by host */
+		u64 num_gmx_ports:8;
+		u64 max_nic_ports:8;
+		u64 app_cap_flags:4;
+		u64 app_mode:16;
+	} r_core_drv_init;
+	struct {
+		u64 opcode:4;
+		u64 subcode:8;
+		u64 len:3;       /** additional 64-bit words */
+		u64 rid:13;
+		u64 reserved:4;
+		u64 extra:25;
+		u64 ifidx:7;
+	} r_nic_info;
+#else
+	u64 u64;
+	struct {
+		u64 ossp:32;  /** opcode/subcode specific parameters */
+		u64 reserved:4;
+		u64 rid:13;   /** req id in response to pkt sent by host */
+		u64 len:3;    /** additional 64-bit words */
+		u64 subcode:8;
+		u64 opcode:4;
+	} r;
+	struct {
+		u64 has_hwtstamp:1;      /** 1 = has hwtstamp */
+		u64 csum_verified:3;     /** checksum verified. */
+		u64 link:8;
+		u64 extra:24;
+		u64 rid:13;   /** req id in response to pkt sent by host */
+		u64 len:3;    /** additional 64-bit words */
+		u64 subcode:8;
+		u64 opcode:4;
+	} r_dh;
+	struct {
+		u64 app_mode:16;
+		u64 app_cap_flags:4;
+		u64 max_nic_ports:8;
+		u64 num_gmx_ports:8;
+		u64 rid:13;
+		u64 len:3;       /** additional 64-bit words */
+		u64 subcode:8;
+		u64 opcode:4;
+	} r_core_drv_init;
+	struct {
+		u64 ifidx:7;
+		u64 extra:25;
+		u64 reserved:4;
+		u64 rid:13;
+		u64 len:3;       /** additional 64-bit words */
+		u64 subcode:8;
+		u64 opcode:4;
+	} r_nic_info;
+#endif
+};
+
+#define  OCT_RH_SIZE   (sizeof(union  octeon_rh))
+
+#define OCT_PKT_PARAM_IPV4OPTS   1
+#define OCT_PKT_PARAM_IPV6EXTHDR 2
+
+union octnic_packet_params {
+	u32 u32;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		u32 reserved:6;
+		u32 tnl_csum:1;
+		u32 ip_csum:1;
+		u32 ipv4opts_ipv6exthdr:2;
+		u32 ipsec_ops:4;
+		u32 tsflag:1;
+		u32 csoffset:9;
+		u32 ifidx:8;
+#else
+		u32 ifidx:8;
+		u32 csoffset:9;
+		u32 tsflag:1;
+		u32 ipsec_ops:4;
+		u32 ipv4opts_ipv6exthdr:2;
+		u32 ip_csum:1;
+		u32 tnl_csum:1;
+		u32 reserved:6;
+#endif
+	} s;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union oct_link_status {
+	u64 u64;
+
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		u64 duplex:8;
+		u64 status:8;
+		u64 mtu:16;
+		u64 speed:16;
+		u64 autoneg:1;
+		u64 interface:4;
+		u64 pause:1;
+		u64 reserved:10;
+#else
+		u64 reserved:10;
+		u64 pause:1;
+		u64 interface:4;
+		u64 autoneg:1;
+		u64 speed:16;
+		u64 mtu:16;
+		u64 status:8;
+		u64 duplex:8;
+#endif
+	} s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct oct_link_info {
+	union oct_link_status link;
+	u64 hw_addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	u16 gmxport;
+	u8 rsvd[3];
+	u8 num_txpciq;
+	u8 num_rxpciq;
+	u8 ifidx;
+#else
+	u8 ifidx;
+	u8 num_rxpciq;
+	u8 num_txpciq;
+	u8 rsvd[3];
+	u16 gmxport;
+#endif
+
+	u8 txpciq[MAX_IOQS_PER_NICIF];
+	u8 rxpciq[MAX_IOQS_PER_NICIF];
+};
+
+#define OCT_LINK_INFO_SIZE   (sizeof(struct oct_link_info))
+
+struct liquidio_if_cfg_info {
+	u64 ifidx;
+	u64 iqmask; /** mask for IQs enabled for  the port */
+	u64 oqmask; /** mask for OQs enabled for the port */
+	struct oct_link_info linfo; /** initial link information */
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_rx_stats {
+	/* link-level stats */
+	u64 total_rcvd;
+	u64 bytes_rcvd;
+	u64 total_bcst;
+	u64 total_mcst;
+	u64 runts;
+	u64 ctl_rcvd;
+	u64 fifo_err;      /* Accounts for over/under-run of buffers */
+	u64 dmac_drop;
+	u64 fcs_err;
+	u64 jabber_err;
+	u64 l2_err;
+	u64 frame_err;
+
+	/* firmware stats */
+	u64 fw_total_rcvd;
+	u64 fw_total_fwd;
+	u64 fw_err_pko;
+	u64 fw_err_link;
+	u64 fw_err_drop;
+	u64 fw_lro_pkts;   /* Number of packets that are LROed      */
+	u64 fw_lro_octs;   /* Number of octets that are LROed       */
+	u64 fw_total_lro;  /* Number of LRO packets formed          */
+	u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+	/* intrmod: packet forward rate */
+	u64 fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_tx_stats {
+	/* link-level stats */
+	u64 total_pkts_sent;
+	u64 total_bytes_sent;
+	u64 mcast_pkts_sent;
+	u64 bcast_pkts_sent;
+	u64 ctl_sent;
+	u64 one_collision_sent;   /* Packets sent after one collision*/
+	u64 multi_collision_sent; /* Packets sent after multiple collision*/
+	u64 max_collision_fail;   /* Packets not sent due to max collisions */
+	u64 max_deferral_fail;   /* Packets not sent due to max deferrals */
+	u64 fifo_err;       /* Accounts for over/under-run of buffers */
+	u64 runts;
+	u64 total_collisions; /* Total number of collisions detected */
+
+	/* firmware stats */
+	u64 fw_total_sent;
+	u64 fw_total_fwd;
+	u64 fw_err_pko;
+	u64 fw_err_link;
+	u64 fw_err_drop;
+};
+
+struct oct_link_stats {
+	struct nic_rx_stats fromwire;
+	struct nic_tx_stats fromhost;
+
+};
+
+#define LIO68XX_LED_CTRL_ADDR     0x3501
+#define LIO68XX_LED_CTRL_CFGON    0x1f
+#define LIO68XX_LED_CTRL_CFGOFF   0x100
+#define LIO68XX_LED_BEACON_ADDR   0x3508
+#define LIO68XX_LED_BEACON_CFGON  0x47fd
+#define LIO68XX_LED_BEACON_CFGOFF 0x11fc
+#define VITESSE_PHY_GPIO_DRIVEON  0x1
+#define VITESSE_PHY_GPIO_CFG      0x8
+#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
+#define VITESSE_PHY_GPIO_HIGH     0x2
+#define VITESSE_PHY_GPIO_LOW      0x3
+
+struct oct_mdio_cmd {
+	u64 op;
+	u64 mdio_addr;
+	u64 value1;
+	u64 value2;
+	u64 value3;
+};
+
+#define OCT_LINK_STATS_SIZE   (sizeof(struct oct_link_stats))
+
+#define LIO_INTRMOD_CHECK_INTERVAL  1
+#define LIO_INTRMOD_MAXPKT_RATETHR  196608 /* max pkt rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR  9216   /* min pkt rate threshold */
+#define LIO_INTRMOD_MAXCNT_TRIGGER  384    /* max pkts to trigger interrupt */
+#define LIO_INTRMOD_MINCNT_TRIGGER  1      /* min pkts to trigger interrupt */
+#define LIO_INTRMOD_MAXTMR_TRIGGER  128    /* max time to trigger interrupt */
+#define LIO_INTRMOD_MINTMR_TRIGGER  32     /* min time to trigger interrupt */
+
+struct oct_intrmod_cfg {
+	u64 intrmod_enable;
+	u64 intrmod_check_intrvl;
+	u64 intrmod_maxpkt_ratethr;
+	u64 intrmod_minpkt_ratethr;
+	u64 intrmod_maxcnt_trigger;
+	u64 intrmod_maxtmr_trigger;
+	u64 intrmod_mincnt_trigger;
+	u64 intrmod_mintmr_trigger;
+};
+
+#define BASE_QUEUE_NOT_REQUESTED 65535
+
+union oct_nic_if_cfg {
+	u64 u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		u64 base_queue:16;
+		u64 num_iqueues:16;
+		u64 num_oqueues:16;
+		u64 gmx_port_id:8;
+		u64 reserved:8;
+#else
+		u64 reserved:8;
+		u64 gmx_port_id:8;
+		u64 num_oqueues:16;
+		u64 num_iqueues:16;
+		u64 base_queue:16;
+#endif
+	} s;
+};
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
new file mode 100644
index 0000000..93819bd
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -0,0 +1,57 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#ifndef _LIQUIDIO_IMAGE_H_
+#define _LIQUIDIO_IMAGE_H_
+
+#define LIO_MAX_FW_TYPE_LEN     (8)
+#define LIO_MAX_FW_FILENAME_LEN (256)
+#define LIO_FW_DIR              "liquidio/"
+#define LIO_FW_BASE_NAME        "lio_"
+#define LIO_FW_NAME_SUFFIX      ".bin"
+#define LIO_FW_NAME_TYPE_NIC    "nic"
+#define LIO_FW_NAME_TYPE_NONE   "none"
+#define LIO_MAX_FIRMWARE_VERSION_LEN 16
+
+#define LIO_MAX_BOOTCMD_LEN 1024
+#define LIO_MAX_IMAGES 16
+#define LIO_NIC_MAGIC 0x434E4943     /* "CNIC" */
+struct octeon_firmware_desc {
+	__be64 addr;
+	__be32 len;
+	__be32 crc32;         /* crc32 of image */
+};
+
+/* Following the header is a list of 64-bit aligned binary images,
+ * as described by the desc field.
+ * Numeric fields are in network byte order.
+ */
+struct octeon_firmware_file_header {
+	__be32 magic;
+	char version[LIO_MAX_FIRMWARE_VERSION_LEN];
+	char bootcmd[LIO_MAX_BOOTCMD_LEN];
+	__be32 num_images;
+	struct octeon_firmware_desc desc[LIO_MAX_IMAGES];
+	__be32 pad;
+	__be32 crc32;         /* header checksum */
+};
+
+#endif /* _LIQUIDIO_IMAGE_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
new file mode 100644
index 0000000..62a8dd5
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -0,0 +1,424 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file  octeon_config.h
+ *  \brief Host Driver: Configuration data structures for the host driver.
+ */
+
+#ifndef __OCTEON_CONFIG_H__
+#define __OCTEON_CONFIG_H__
+
+/*--------------------------CONFIG VALUES------------------------*/
+
+/* The following macros affect the way the driver data structures
+ * are generated for Octeon devices.
+ * They can be modified.
+ */
+
+/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
+ * multiple(<= MAX_OCTEON_NICIF) Miniports
+ */
+#define   MAX_OCTEON_NICIF             32
+#define   MAX_OCTEON_DEVICES           MAX_OCTEON_NICIF
+#define   MAX_OCTEON_LINKS	       MAX_OCTEON_NICIF
+#define   MAX_OCTEON_MULTICAST_ADDR    32
+
+/* CN6xxx IQ configuration macros */
+#define   CN6XXX_MAX_INPUT_QUEUES      32
+#define   CN6XXX_MAX_IQ_DESCRIPTORS    2048
+#define   CN6XXX_DB_MIN                1
+#define   CN6XXX_DB_MAX                8
+#define   CN6XXX_DB_TIMEOUT            1
+
+/* CN6xxx OQ configuration macros */
+#define   CN6XXX_MAX_OUTPUT_QUEUES     32
+#define   CN6XXX_MAX_OQ_DESCRIPTORS    2048
+#define   CN6XXX_OQ_BUF_SIZE           1536
+#define   CN6XXX_OQ_PKTSPER_INTR       ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+					(CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+#define   CN6XXX_OQ_REFIL_THRESHOLD    ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+					(CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+
+#define   CN6XXX_OQ_INTR_PKT           64
+#define   CN6XXX_OQ_INTR_TIME          100
+#define   DEFAULT_NUM_NIC_PORTS_66XX   2
+#define   DEFAULT_NUM_NIC_PORTS_68XX   4
+#define   DEFAULT_NUM_NIC_PORTS_68XX_210NV  2
+
+/* common OCTEON configuration macros */
+#define   CN6XXX_CFG_IO_QUEUES         32
+#define   OCTEON_32BYTE_INSTR          32
+#define   OCTEON_64BYTE_INSTR          64
+#define   OCTEON_MAX_BASE_IOQ          4
+#define   OCTEON_OQ_BUFPTR_MODE        0
+#define   OCTEON_OQ_INFOPTR_MODE       1
+
+#define   OCTEON_DMA_INTR_PKT          64
+#define   OCTEON_DMA_INTR_TIME         1000
+
+#define MAX_TXQS_PER_INTF  8
+#define MAX_RXQS_PER_INTF  8
+#define DEF_TXQS_PER_INTF  4
+#define DEF_RXQS_PER_INTF  4
+
+#define INVALID_IOQ_NO          0xff
+
+#define   DEFAULT_POW_GRP       0
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg)                      ((cfg)->iq)
+#define CFG_GET_IQ_MAX_Q(cfg)                    ((cfg)->iq.max_iqs)
+#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg)        ((cfg)->iq.pending_list_size)
+#define CFG_GET_IQ_INSTR_TYPE(cfg)               ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_DB_MIN(cfg)                   ((cfg)->iq.db_min)
+#define CFG_GET_IQ_DB_TIMEOUT(cfg)               ((cfg)->iq.db_timeout)
+
+#define CFG_GET_OQ_MAX_Q(cfg)                    ((cfg)->oq.max_oqs)
+#define CFG_GET_OQ_INFO_PTR(cfg)                 ((cfg)->oq.info_ptr)
+#define CFG_GET_OQ_PKTS_PER_INTR(cfg)            ((cfg)->oq.pkts_per_intr)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg)         ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg)                 ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg)                ((cfg)->oq.oq_intr_time)
+#define CFG_SET_OQ_INTR_PKT(cfg, val)            (cfg)->oq.oq_intr_pkt = val
+#define CFG_SET_OQ_INTR_TIME(cfg, val)           (cfg)->oq.oq_intr_time = val
+
+#define CFG_GET_DMA_INTR_PKT(cfg)                ((cfg)->dma.dma_intr_pkt)
+#define CFG_GET_DMA_INTR_TIME(cfg)               ((cfg)->dma.dma_intr_time)
+#define CFG_GET_NUM_NIC_PORTS(cfg)               ((cfg)->num_nic_ports)
+#define CFG_GET_NUM_DEF_TX_DESCS(cfg)            ((cfg)->num_def_tx_descs)
+#define CFG_GET_NUM_DEF_RX_DESCS(cfg)            ((cfg)->num_def_rx_descs)
+#define CFG_GET_DEF_RX_BUF_SIZE(cfg)             ((cfg)->def_rx_buf_size)
+
+#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].max_txqs)
+#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].num_txqs)
+#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].max_rxqs)
+#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].num_rxqs)
+#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].num_rx_descs)
+#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].num_tx_descs)
+#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].rx_buf_size)
+#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].base_queue)
+#define CFG_GET_GMXID_NIC_IF(cfg, idx) \
+				((cfg)->nic_if_cfg[idx].gmx_port_id)
+
+#define CFG_GET_CTRL_Q_GRP(cfg)                  ((cfg)->misc.ctrlq_grp)
+#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \
+				((cfg)->misc.host_link_query_interval)
+#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \
+				((cfg)->misc.oct_link_query_interval)
+#define CFG_GET_IS_SLI_BP_ON(cfg)                ((cfg)->misc.enable_sli_oq_bp)
+
+/* Max IOQs per OCTEON Link */
+#define MAX_IOQS_PER_NICIF              32
+
+enum lio_card_type {
+	LIO_210SV = 0, /* Two port, 66xx */
+	LIO_210NV,     /* Two port, 68xx */
+	LIO_410NV      /* Four port, 68xx */
+};
+
+#define LIO_210SV_NAME "210sv"
+#define LIO_210NV_NAME "210nv"
+#define LIO_410NV_NAME "410nv"
+
+/** Structure to define the configuration attributes for each Input queue.
+ *  Applicable to all Octeon processors
+ **/
+struct octeon_iq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u64 reserved:32;
+
+	/** Minimum ticks to wait before checking for pending instructions. */
+	u64 db_timeout:16;
+
+	/** Minimum number of commands pending to be posted to Octeon
+	 *  before driver hits the Input queue doorbell.
+	 */
+	u64 db_min:8;
+
+	/** Command size - 32 or 64 bytes */
+	u64 instr_type:32;
+
+	/** Pending list size (usually set to the sum of the size of all Input
+	 *  queues)
+	 */
+	u64 pending_list_size:32;
+
+	/* Max number of IQs available */
+	u64 max_iqs:8;
+#else
+	/* Max number of IQs available */
+	u64 max_iqs:8;
+
+	/** Pending list size (usually set to the sum of the size of all Input
+	 *  queues)
+	 */
+	u64 pending_list_size:32;
+
+	/** Command size - 32 or 64 bytes */
+	u64 instr_type:32;
+
+	/** Minimum number of commands pending to be posted to Octeon
+	 *  before driver hits the Input queue doorbell.
+	 */
+	u64 db_min:8;
+
+	/** Minimum ticks to wait before checking for pending instructions. */
+	u64 db_timeout:16;
+
+	u64 reserved:32;
+#endif
+};
+
+/** Structure to define the configuration attributes for each Output queue.
+ *  Applicable to all Octeon processors
+ **/
+struct octeon_oq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u64 reserved:16;
+
+	u64 pkts_per_intr:16;
+
+	/** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+	 *  host if atleast one packet was sent in the time interval specified
+	 *  by this field. The driver uses time interval interrupt coalescing
+	 *  by default. The time is specified in microseconds.
+	 */
+	u64 oq_intr_time:16;
+
+	/** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+	 *  only if it sent as many packets as specified by this field.
+	 *  The driver
+	 *  usually does not use packet count interrupt coalescing.
+	 */
+	u64 oq_intr_pkt:16;
+
+	/** The number of buffers that were consumed during packet processing by
+	 *   the driver on this Output queue before the driver attempts to
+	 *   replenish
+	 *   the descriptor ring with new buffers.
+	 */
+	u64 refill_threshold:16;
+
+	/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+	u64 info_ptr:32;
+
+	/* Max number of OQs available */
+	u64 max_oqs:8;
+
+#else
+	/* Max number of OQs available */
+	u64 max_oqs:8;
+
+	/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+	u64 info_ptr:32;
+
+	/** The number of buffers that were consumed during packet processing by
+	 *   the driver on this Output queue before the driver attempts to
+	 *   replenish
+	 *   the descriptor ring with new buffers.
+	 */
+	u64 refill_threshold:16;
+
+	/** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+	 *  only if it sent as many packets as specified by this field.
+	 *  The driver
+	 *  usually does not use packet count interrupt coalescing.
+	 */
+	u64 oq_intr_pkt:16;
+
+	/** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+	 *  host if atleast one packet was sent in the time interval specified
+	 *  by this field. The driver uses time interval interrupt coalescing
+	 *  by default.  The time is specified in microseconds.
+	 */
+	u64 oq_intr_time:16;
+
+	u64 pkts_per_intr:16;
+
+	u64 reserved:16;
+#endif
+
+};
+
+/** This structure conatins the NIC link configuration attributes,
+ *  common for all the OCTEON Modles.
+ */
+struct octeon_nic_if_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u64 reserved:56;
+
+	u64 base_queue:16;
+
+	u64 gmx_port_id:8;
+
+	/* SKB size, We need not change buf size even for Jumbo frames.
+	 * Octeon can send jumbo frames in 4 consecutive descriptors,
+	 */
+	u64 rx_buf_size:16;
+
+	/* Num of desc for tx rings */
+	u64 num_tx_descs:16;
+
+	/* Num of desc for rx rings */
+	u64 num_rx_descs:16;
+
+	/* Actual configured value. Range could be: 1...max_rxqs */
+	u64 num_rxqs:16;
+
+	/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+	u64 max_rxqs:16;
+
+	/* Actual configured value. Range could be: 1...max_txqs */
+	u64 num_txqs:16;
+
+	/* Max Txqs: Half for each of the two ports :max_iq/2 */
+	u64 max_txqs:16;
+#else
+	/* Max Txqs: Half for each of the two ports :max_iq/2 */
+	u64 max_txqs:16;
+
+	/* Actual configured value. Range could be: 1...max_txqs */
+	u64 num_txqs:16;
+
+	/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+	u64 max_rxqs:16;
+
+	/* Actual configured value. Range could be: 1...max_rxqs */
+	u64 num_rxqs:16;
+
+	/* Num of desc for rx rings */
+	u64 num_rx_descs:16;
+
+	/* Num of desc for tx rings */
+	u64 num_tx_descs:16;
+
+	/* SKB size, We need not change buf size even for Jumbo frames.
+	 * Octeon can send jumbo frames in 4 consecutive descriptors,
+	 */
+	u64 rx_buf_size:16;
+
+	u64 gmx_port_id:8;
+
+	u64 base_queue:16;
+
+	u64 reserved:56;
+#endif
+
+};
+
+/** Structure to define the configuration attributes for meta data.
+ *  Applicable to all Octeon processors.
+ */
+
+struct octeon_misc_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+	/** Host link status polling period */
+	u64 host_link_query_interval:32;
+	/** Oct link status polling period */
+	u64 oct_link_query_interval:32;
+
+	u64 enable_sli_oq_bp:1;
+	/** Control IQ Group */
+	u64 ctrlq_grp:4;
+#else
+	/** Control IQ Group */
+	u64 ctrlq_grp:4;
+	/** BP for SLI OQ */
+	u64 enable_sli_oq_bp:1;
+	/** Host link status polling period */
+	u64 oct_link_query_interval:32;
+	/** Oct link status polling period */
+	u64 host_link_query_interval:32;
+#endif
+};
+
+/** Structure to define the configuration for all OCTEON processors. */
+struct octeon_config {
+	u16 card_type;
+	char *card_name;
+
+	/** Input Queue attributes. */
+	struct octeon_iq_config iq;
+
+	/** Output Queue attributes. */
+	struct octeon_oq_config oq;
+
+	/** NIC Port Configuration */
+	struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF];
+
+	/** Miscellaneous attributes */
+	struct octeon_misc_config misc;
+
+	int num_nic_ports;
+
+	int num_def_tx_descs;
+
+	/* Num of desc for rx rings */
+	int num_def_rx_descs;
+
+	int def_rx_buf_size;
+
+};
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
+#define  MAX_BAR1_MAP_INDEX                     2
+#define  OCTEON_BAR1_ENTRY_SIZE         (4 * 1024 * 1024)
+
+/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access.
+ * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access.
+ */
+#define  MAX_BAR1_IOREMAP_SIZE  ((MAX_BAR1_MAP_INDEX + 1) * \
+				 OCTEON_BAR1_ENTRY_SIZE)
+
+/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
+ * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
+ */
+#define MAX_RESPONSE_LISTS           4
+
+/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
+ * dispatch table.
+ */
+#define OPCODE_MASK_BITS             6
+
+/* Mask for the 6-bit lookup hash */
+#define OCTEON_OPCODE_MASK           0x3f
+
+/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */
+#define DISPATCH_LIST_SIZE                      BIT(OPCODE_MASK_BITS)
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_INSTR_QUEUES         CN6XXX_MAX_INPUT_QUEUES
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES        CN6XXX_MAX_OUTPUT_QUEUES
+
+#endif /* __OCTEON_CONFIG_H__  */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
new file mode 100644
index 0000000..466147e
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -0,0 +1,723 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/**
+ * @file octeon_console.c
+ */
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void octeon_remote_lock(void);
+static void octeon_remote_unlock(void);
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+					     const char *name,
+					     u32 flags);
+
+#define MIN(a, b) min((a), (b))
+#define CAST_ULL(v) ((u64)(v))
+
+#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR    0x0006c008
+#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR     0x0006c004
+#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR   0x0006c000
+#define BOOTLOADER_PCI_READ_DESC_ADDR           0x0006c100
+#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN     248
+
+#define OCTEON_PCI_IO_BUF_OWNER_OCTEON    0x00000001
+#define OCTEON_PCI_IO_BUF_OWNER_HOST      0x00000002
+
+/** Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/** minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE     (16ull)
+
+/** CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER   3
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER   0
+
+/* Current versions */
+#define OCTEON_PCI_CONSOLE_MAJOR_VERSION    1
+#define OCTEON_PCI_CONSOLE_MINOR_VERSION    0
+#define OCTEON_PCI_CONSOLE_BLOCK_NAME   "__pci_console"
+#define OCTEON_CONSOLE_POLL_INTERVAL_MS  100    /* 10 times per second */
+
+/* First three members of cvmx_bootmem_desc are left in original
+** positions for backwards compatibility.
+** Assumes big endian target
+*/
+struct cvmx_bootmem_desc {
+	/** spinlock to control access to list */
+	u32 lock;
+
+	/** flags for indicating various conditions */
+	u32 flags;
+
+	u64 head_addr;
+
+	/** incremented changed when incompatible changes made */
+	u32 major_version;
+
+	/** incremented changed when compatible changes made,
+	 *  reset to zero when major incremented
+	 */
+	u32 minor_version;
+
+	u64 app_data_addr;
+	u64 app_data_size;
+
+	/** number of elements in named blocks array */
+	u32 nb_num_blocks;
+
+	/** length of name array in bootmem blocks */
+	u32 named_block_name_len;
+
+	/** address of named memory block descriptors */
+	u64 named_block_array_addr;
+};
+
+/* Structure that defines a single console.
+ *
+ * Note: when read_index == write_index, the buffer is empty.
+ * The actual usable size of each console is console_buf_size -1;
+ */
+struct octeon_pci_console {
+	u64 input_base_addr;
+	u32 input_read_index;
+	u32 input_write_index;
+	u64 output_base_addr;
+	u32 output_read_index;
+	u32 output_write_index;
+	u32 lock;
+	u32 buf_size;
+};
+
+/* This is the main container structure that contains all the information
+ * about all PCI consoles.  The address of this structure is passed to various
+ * routines that operation on PCI consoles.
+ */
+struct octeon_pci_console_desc {
+	u32 major_version;
+	u32 minor_version;
+	u32 lock;
+	u32 flags;
+	u32 num_consoles;
+	u32 pad;
+	/* must be 64 bit aligned here... */
+	/* Array of addresses of octeon_pci_console structures */
+	u64 console_addr_array[0];
+	/* Implicit storage for console_addr_array */
+};
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This macro returns a member of the cvmx_bootmem_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc to read.
+ * Regardless of the type of the field, the return type is always
+ * a u64.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field)                              \
+	__cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr,                 \
+				offsetof(struct cvmx_bootmem_desc, field),   \
+				SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
+
+#define __cvmx_bootmem_lock(flags)
+#define __cvmx_bootmem_unlock(flags)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc to read. Regardless of the type
+ * of the field, the return type is always a u64. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field)                   \
+	__cvmx_bootmem_desc_get(oct, addr,                               \
+		offsetof(struct cvmx_bootmem_named_block_desc, field),   \
+		SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param oct    Pointer to current octeon device
+ * @param base   64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ *               accessed.
+ * @param size   Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
+					  u64 base,
+					  u32 offset,
+					  u32 size)
+{
+	base = (1ull << 63) | (base + offset);
+	switch (size) {
+	case 4:
+		return octeon_read_device_mem32(oct, base);
+	case 8:
+		return octeon_read_device_mem64(oct, base);
+	default:
+		return 0;
+	}
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr   Physical address of the named block descriptor
+ * @param str    String to receive the named block string name
+ * @param len    Length of the string buffer, which must match the length
+ *               stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
+					u64 addr,
+					char *str,
+					u32 len)
+{
+	addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+	octeon_pci_read_core_mem(oct, addr, str, len);
+	str[len] = 0;
+}
+
+/* See header file for descriptions of functions */
+
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ *               Exact major version to check against. A zero means
+ *               check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ *         incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(struct octeon_device *oct,
+					u32 exact_match)
+{
+	u32 major_version;
+	u32 minor_version;
+
+	if (!oct->bootmem_desc_addr)
+		oct->bootmem_desc_addr =
+			octeon_read_device_mem64(oct,
+						 BOOTLOADER_PCI_READ_DESC_ADDR);
+	major_version =
+		(u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version);
+	minor_version =
+		(u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version);
+	dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
+		major_version);
+	if ((major_version > 3) ||
+	    (exact_match && major_version != exact_match)) {
+		dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n",
+			major_version, minor_version,
+			CAST_ULL(oct->bootmem_desc_addr));
+		return -1;
+	} else {
+		return 0;
+	}
+}
+
+static const struct cvmx_bootmem_named_block_desc
+*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct,
+					const char *name, u32 flags)
+{
+	struct cvmx_bootmem_named_block_desc *desc =
+		&oct->bootmem_named_block_desc;
+	u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags);
+
+	if (named_addr) {
+		desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+							       base_addr);
+		desc->size =
+			CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size);
+		strncpy(desc->name, name, sizeof(desc->name));
+		desc->name[sizeof(desc->name) - 1] = 0;
+		return &oct->bootmem_named_block_desc;
+	} else {
+		return NULL;
+	}
+}
+
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+					     const char *name,
+					     u32 flags)
+{
+	u64 result = 0;
+
+	__cvmx_bootmem_lock(flags);
+	if (!__cvmx_bootmem_check_version(oct, 3)) {
+		u32 i;
+		u64 named_block_array_addr =
+			CVMX_BOOTMEM_DESC_GET_FIELD(oct,
+						    named_block_array_addr);
+		u32 num_blocks = (u32)
+			CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks);
+		u32 name_length = (u32)
+			CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len);
+		u64 named_addr = named_block_array_addr;
+
+		for (i = 0; i < num_blocks; i++) {
+			u64 named_size =
+				CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+							     size);
+			if (name && named_size) {
+				char *name_tmp =
+					kmalloc(name_length + 1, GFP_KERNEL);
+				CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
+							    name_tmp,
+							    name_length);
+				if (!strncmp(name, name_tmp, name_length)) {
+					result = named_addr;
+					kfree(name_tmp);
+					break;
+				}
+				kfree(name_tmp);
+			} else if (!name && !named_size) {
+				result = named_addr;
+				break;
+			}
+
+			named_addr +=
+				sizeof(struct cvmx_bootmem_named_block_desc);
+		}
+	}
+	__cvmx_bootmem_unlock(flags);
+	return result;
+}
+
+/**
+ * Find a named block on the remote Octeon
+ *
+ * @param name      Name of block to find
+ * @param base_addr Address the block is at (OUTPUT)
+ * @param size      The size of the block (OUTPUT)
+ *
+ * @return Zero on success, One on failure.
+ */
+static int octeon_named_block_find(struct octeon_device *oct, const char *name,
+				   u64 *base_addr, u64 *size)
+{
+	const struct cvmx_bootmem_named_block_desc *named_block;
+
+	octeon_remote_lock();
+	named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0);
+	octeon_remote_unlock();
+	if (named_block) {
+		*base_addr = named_block->base_addr;
+		*size = named_block->size;
+		return 0;
+	}
+	return 1;
+}
+
+static void octeon_remote_lock(void)
+{
+	/* fill this in if any sharing is needed */
+}
+
+static void octeon_remote_unlock(void)
+{
+	/* fill this in if any sharing is needed */
+}
+
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+			    u32 wait_hundredths)
+{
+	u32 len = strlen(cmd_str);
+
+	dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
+
+	if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) {
+		dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n",
+			BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1);
+		return -1;
+	}
+
+	if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) {
+		dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n");
+		return -1;
+	}
+
+	/* Write command to bootloader */
+	octeon_remote_lock();
+	octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR,
+				  (u8 *)cmd_str, len);
+	octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR,
+				  len);
+	octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR,
+				  OCTEON_PCI_IO_BUF_OWNER_OCTEON);
+
+	/* Bootloader should accept command very quickly
+	 * if it really was ready
+	 */
+	if (octeon_wait_for_bootloader(oct, 200) != 0) {
+		octeon_remote_unlock();
+		dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n");
+		return -1;
+	}
+	octeon_remote_unlock();
+	return 0;
+}
+
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+			       u32 wait_time_hundredths)
+{
+	dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n",
+		wait_time_hundredths);
+
+	if (octeon_mem_access_ok(oct))
+		return -1;
+
+	while (wait_time_hundredths > 0 &&
+	       octeon_read_device_mem32(oct,
+					BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR)
+	       != OCTEON_PCI_IO_BUF_OWNER_HOST) {
+		if (--wait_time_hundredths <= 0)
+			return -1;
+		schedule_timeout_uninterruptible(HZ / 100);
+	}
+	return 0;
+}
+
+static void octeon_console_handle_result(struct octeon_device *oct,
+					 size_t console_num,
+					 char *buffer, s32 bytes_read)
+{
+	struct octeon_console *console;
+
+	console = &oct->console[console_num];
+
+	console->waiting = 0;
+}
+
+static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES];
+
+static void output_console_line(struct octeon_device *oct,
+				struct octeon_console *console,
+				size_t console_num,
+				char *console_buffer,
+				s32 bytes_read)
+{
+	char *line;
+	s32 i;
+
+	line = console_buffer;
+	for (i = 0; i < bytes_read; i++) {
+		/* Output a line at a time, prefixed */
+		if (console_buffer[i] == '\n') {
+			console_buffer[i] = '\0';
+			if (console->leftover[0]) {
+				dev_info(&oct->pci_dev->dev, "%lu: %s%s\n",
+					 console_num, console->leftover,
+					 line);
+				console->leftover[0] = '\0';
+			} else {
+				dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+					 console_num, line);
+			}
+			line = &console_buffer[i + 1];
+		}
+	}
+
+	/* Save off any leftovers */
+	if (line != &console_buffer[bytes_read]) {
+		console_buffer[bytes_read] = '\0';
+		strcpy(console->leftover, line);
+	}
+}
+
+static void check_console(struct work_struct *work)
+{
+	s32 bytes_read, tries, total_read;
+	struct octeon_console *console;
+	struct cavium_wk *wk = (struct cavium_wk *)work;
+	struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+	size_t console_num = wk->ctxul;
+	u32 delay;
+
+	console = &oct->console[console_num];
+	tries = 0;
+	total_read = 0;
+
+	do {
+		/* Take console output regardless of whether it will
+		 * be logged
+		 */
+		bytes_read =
+			octeon_console_read(oct, console_num, console_buffer,
+					    sizeof(console_buffer) - 1, 0);
+		if (bytes_read > 0) {
+			total_read += bytes_read;
+			if (console->waiting) {
+				octeon_console_handle_result(oct, console_num,
+							     console_buffer,
+							     bytes_read);
+			}
+			if (octeon_console_debug_enabled(console_num)) {
+				output_console_line(oct, console, console_num,
+						    console_buffer, bytes_read);
+			}
+		} else if (bytes_read < 0) {
+			dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+				console_num, bytes_read);
+		}
+
+		tries++;
+	} while ((bytes_read > 0) && (tries < 16));
+
+	/* If nothing is read after polling the console,
+	 * output any leftovers if any
+	 */
+	if (octeon_console_debug_enabled(console_num) &&
+	    (total_read == 0) && (console->leftover[0])) {
+		dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+			 console_num, console->leftover);
+		console->leftover[0] = '\0';
+	}
+
+	delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+
+	schedule_delayed_work(&wk->work, msecs_to_jiffies(delay));
+}
+
+int octeon_init_consoles(struct octeon_device *oct)
+{
+	int ret = 0;
+	u64 addr, size;
+
+	ret = octeon_mem_access_ok(oct);
+	if (ret) {
+		dev_err(&oct->pci_dev->dev, "Memory access not okay'\n");
+		return ret;
+	}
+
+	ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr,
+				      &size);
+	if (ret) {
+		dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n",
+			OCTEON_PCI_CONSOLE_BLOCK_NAME);
+		return ret;
+	}
+
+	/* num_consoles > 0, is an indication that the consoles
+	 * are accessible
+	 */
+	oct->num_consoles = octeon_read_device_mem32(oct,
+		addr + offsetof(struct octeon_pci_console_desc,
+			num_consoles));
+	oct->console_desc_addr = addr;
+
+	dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n",
+		oct->num_consoles);
+
+	return ret;
+}
+
+int octeon_add_console(struct octeon_device *oct, u32 console_num)
+{
+	int ret = 0;
+	u32 delay;
+	u64 coreaddr;
+	struct delayed_work *work;
+	struct octeon_console *console;
+
+	if (console_num >= oct->num_consoles) {
+		dev_err(&oct->pci_dev->dev,
+			"trying to read from console number %d when only 0 to %d exist\n",
+			console_num, oct->num_consoles);
+	} else {
+		console = &oct->console[console_num];
+
+		console->waiting = 0;
+
+		coreaddr = oct->console_desc_addr + console_num * 8 +
+			offsetof(struct octeon_pci_console_desc,
+				 console_addr_array);
+		console->addr = octeon_read_device_mem64(oct, coreaddr);
+		coreaddr = console->addr + offsetof(struct octeon_pci_console,
+						    buf_size);
+		console->buffer_size = octeon_read_device_mem32(oct, coreaddr);
+		coreaddr = console->addr + offsetof(struct octeon_pci_console,
+						    input_base_addr);
+		console->input_base_addr =
+			octeon_read_device_mem64(oct, coreaddr);
+		coreaddr = console->addr + offsetof(struct octeon_pci_console,
+						    output_base_addr);
+		console->output_base_addr =
+			octeon_read_device_mem64(oct, coreaddr);
+		console->leftover[0] = '\0';
+
+		work = &oct->console_poll_work[console_num].work;
+
+		INIT_DELAYED_WORK(work, check_console);
+		oct->console_poll_work[console_num].ctxptr = (void *)oct;
+		oct->console_poll_work[console_num].ctxul = console_num;
+		delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+		schedule_delayed_work(work, msecs_to_jiffies(delay));
+
+		if (octeon_console_debug_enabled(console_num)) {
+			ret = octeon_console_send_cmd(oct,
+						      "setenv pci_console_active 1",
+						      2000);
+		}
+
+		console->active = 1;
+	}
+
+	return ret;
+}
+
+/**
+ * Removes all consoles
+ *
+ * @param oct         octeon device
+ */
+void octeon_remove_consoles(struct octeon_device *oct)
+{
+	u32 i;
+	struct octeon_console *console;
+
+	for (i = 0; i < oct->num_consoles; i++) {
+		console = &oct->console[i];
+
+		if (!console->active)
+			continue;
+
+		cancel_delayed_work_sync(&oct->console_poll_work[i].
+						work);
+		console->addr = 0;
+		console->buffer_size = 0;
+		console->input_base_addr = 0;
+		console->output_base_addr = 0;
+	}
+
+	oct->num_consoles = 0;
+}
+
+static inline int octeon_console_free_bytes(u32 buffer_size,
+					    u32 wr_idx,
+					    u32 rd_idx)
+{
+	if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+		return -1;
+
+	return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size;
+}
+
+static inline int octeon_console_avail_bytes(u32 buffer_size,
+					     u32 wr_idx,
+					     u32 rd_idx)
+{
+	if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+		return -1;
+
+	return buffer_size - 1 -
+	       octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
+}
+
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+			char *buffer, u32 buf_size, u32 flags)
+{
+	int bytes_to_read;
+	u32 rd_idx, wr_idx;
+	struct octeon_console *console;
+
+	if (console_num >= oct->num_consoles) {
+		dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n",
+			console_num);
+		return 0;
+	}
+
+	console = &oct->console[console_num];
+
+	/* Check to see if any data is available.
+	 * Maybe optimize this with 64-bit read.
+	 */
+	rd_idx = octeon_read_device_mem32(oct, console->addr +
+		offsetof(struct octeon_pci_console, output_read_index));
+	wr_idx = octeon_read_device_mem32(oct, console->addr +
+		offsetof(struct octeon_pci_console, output_write_index));
+
+	bytes_to_read = octeon_console_avail_bytes(console->buffer_size,
+						   wr_idx, rd_idx);
+	if (bytes_to_read <= 0)
+		return bytes_to_read;
+
+	bytes_to_read = MIN(bytes_to_read, (s32)buf_size);
+
+	/* Check to see if what we want to read is not contiguous, and limit
+	 * ourselves to the contiguous block
+	 */
+	if (rd_idx + bytes_to_read >= console->buffer_size)
+		bytes_to_read = console->buffer_size - rd_idx;
+
+	octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
+				 buffer, bytes_to_read);
+	octeon_write_device_mem32(oct, console->addr +
+				  offsetof(struct octeon_pci_console,
+					   output_read_index),
+				  (rd_idx + bytes_to_read) %
+				  console->buffer_size);
+
+	return bytes_to_read;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
new file mode 100644
index 0000000..0d3106b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -0,0 +1,1309 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/** Default configuration
+ *  for CN66XX OCTEON Models.
+ */
+static struct octeon_config default_cn66xx_conf = {
+	.card_type                              = LIO_210SV,
+	.card_name                              = LIO_210SV_NAME,
+
+	/** IQ attributes */
+	.iq					= {
+		.max_iqs			= CN6XXX_CFG_IO_QUEUES,
+		.pending_list_size		=
+			(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+		.instr_type			= OCTEON_64BYTE_INSTR,
+		.db_min				= CN6XXX_DB_MIN,
+		.db_timeout			= CN6XXX_DB_TIMEOUT,
+	}
+	,
+
+	/** OQ attributes */
+	.oq					= {
+		.max_oqs			= CN6XXX_CFG_IO_QUEUES,
+		.info_ptr			= OCTEON_OQ_INFOPTR_MODE,
+		.refill_threshold		= CN6XXX_OQ_REFIL_THRESHOLD,
+		.oq_intr_pkt			= CN6XXX_OQ_INTR_PKT,
+		.oq_intr_time			= CN6XXX_OQ_INTR_TIME,
+		.pkts_per_intr			= CN6XXX_OQ_PKTSPER_INTR,
+	}
+	,
+
+	.num_nic_ports				= DEFAULT_NUM_NIC_PORTS_66XX,
+	.num_def_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+	.num_def_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+	.def_rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+	/* For ethernet interface 0:  Port cfg Attributes */
+	.nic_if_cfg[0] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 0,
+	},
+
+	.nic_if_cfg[1] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 1,
+	},
+
+	/** Miscellaneous attributes */
+	.misc					= {
+		/* Host driver link query interval */
+		.oct_link_query_interval	= 100,
+
+		/* Octeon link query interval */
+		.host_link_query_interval	= 500,
+
+		.enable_sli_oq_bp		= 0,
+
+		/* Control queue group */
+		.ctrlq_grp			= 1,
+	}
+	,
+};
+
+/** Default configuration
+ *  for CN68XX OCTEON Model.
+ */
+
+static struct octeon_config default_cn68xx_conf = {
+	.card_type                              = LIO_410NV,
+	.card_name                              = LIO_410NV_NAME,
+
+	/** IQ attributes */
+	.iq					= {
+		.max_iqs			= CN6XXX_CFG_IO_QUEUES,
+		.pending_list_size		=
+			(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+		.instr_type			= OCTEON_64BYTE_INSTR,
+		.db_min				= CN6XXX_DB_MIN,
+		.db_timeout			= CN6XXX_DB_TIMEOUT,
+	}
+	,
+
+	/** OQ attributes */
+	.oq					= {
+		.max_oqs			= CN6XXX_CFG_IO_QUEUES,
+		.info_ptr			= OCTEON_OQ_INFOPTR_MODE,
+		.refill_threshold		= CN6XXX_OQ_REFIL_THRESHOLD,
+		.oq_intr_pkt			= CN6XXX_OQ_INTR_PKT,
+		.oq_intr_time			= CN6XXX_OQ_INTR_TIME,
+		.pkts_per_intr			= CN6XXX_OQ_PKTSPER_INTR,
+	}
+	,
+
+	.num_nic_ports				= DEFAULT_NUM_NIC_PORTS_68XX,
+	.num_def_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+	.num_def_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+	.def_rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+	.nic_if_cfg[0] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 0,
+	},
+
+	.nic_if_cfg[1] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 1,
+	},
+
+	.nic_if_cfg[2] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 2,
+	},
+
+	.nic_if_cfg[3] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 3,
+	},
+
+	/** Miscellaneous attributes */
+	.misc					= {
+		/* Host driver link query interval */
+		.oct_link_query_interval	= 100,
+
+		/* Octeon link query interval */
+		.host_link_query_interval	= 500,
+
+		.enable_sli_oq_bp		= 0,
+
+		/* Control queue group */
+		.ctrlq_grp			= 1,
+	}
+	,
+};
+
+/** Default configuration
+ *  for CN68XX OCTEON Model.
+ */
+static struct octeon_config default_cn68xx_210nv_conf = {
+	.card_type                              = LIO_210NV,
+	.card_name                              = LIO_210NV_NAME,
+
+	/** IQ attributes */
+
+	.iq					= {
+		.max_iqs			= CN6XXX_CFG_IO_QUEUES,
+		.pending_list_size		=
+			(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+		.instr_type			= OCTEON_64BYTE_INSTR,
+		.db_min				= CN6XXX_DB_MIN,
+		.db_timeout			= CN6XXX_DB_TIMEOUT,
+	}
+	,
+
+	/** OQ attributes */
+	.oq					= {
+		.max_oqs			= CN6XXX_CFG_IO_QUEUES,
+		.info_ptr			= OCTEON_OQ_INFOPTR_MODE,
+		.refill_threshold		= CN6XXX_OQ_REFIL_THRESHOLD,
+		.oq_intr_pkt			= CN6XXX_OQ_INTR_PKT,
+		.oq_intr_time			= CN6XXX_OQ_INTR_TIME,
+		.pkts_per_intr			= CN6XXX_OQ_PKTSPER_INTR,
+	}
+	,
+
+	.num_nic_ports			= DEFAULT_NUM_NIC_PORTS_68XX_210NV,
+	.num_def_rx_descs		= CN6XXX_MAX_OQ_DESCRIPTORS,
+	.num_def_tx_descs		= CN6XXX_MAX_IQ_DESCRIPTORS,
+	.def_rx_buf_size		= CN6XXX_OQ_BUF_SIZE,
+
+	.nic_if_cfg[0] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 0,
+	},
+
+	.nic_if_cfg[1] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 1,
+	},
+
+	/** Miscellaneous attributes */
+	.misc					= {
+		/* Host driver link query interval */
+		.oct_link_query_interval	= 100,
+
+		/* Octeon link query interval */
+		.host_link_query_interval	= 500,
+
+		.enable_sli_oq_bp		= 0,
+
+		/* Control queue group */
+		.ctrlq_grp			= 1,
+	}
+	,
+};
+
+enum {
+	OCTEON_CONFIG_TYPE_DEFAULT = 0,
+	NUM_OCTEON_CONFS,
+};
+
+static struct octeon_config_ptr {
+	u32 conf_type;
+} oct_conf_info[MAX_OCTEON_DEVICES] = {
+	{
+		OCTEON_CONFIG_TYPE_DEFAULT,
+	}, {
+		OCTEON_CONFIG_TYPE_DEFAULT,
+	}, {
+		OCTEON_CONFIG_TYPE_DEFAULT,
+	}, {
+		OCTEON_CONFIG_TYPE_DEFAULT,
+	},
+};
+
+static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
+	"BEGIN",	"PCI-MAP-DONE",	      "DISPATCH-INIT-DONE",
+	"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
+	"DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
+	"HOST-READY",	"CORE-READY",	      "RUNNING",	   "IN-RESET",
+	"INVALID"
+};
+
+static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
+	"BASE", "NIC", "UNKNOWN"};
+
+static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static u32 octeon_device_count;
+
+static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
+
+static void oct_set_config_info(int oct_id, int conf_type)
+{
+	if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
+		conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
+	oct_conf_info[oct_id].conf_type = conf_type;
+}
+
+void octeon_init_device_list(int conf_type)
+{
+	int i;
+
+	memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
+	for (i = 0; i <  MAX_OCTEON_DEVICES; i++)
+		oct_set_config_info(i, conf_type);
+}
+
+static void *__retrieve_octeon_config_info(struct octeon_device *oct,
+					   u16 card_type)
+{
+	u32 oct_id = oct->octeon_id;
+	void *ret = NULL;
+
+	switch (oct_conf_info[oct_id].conf_type) {
+	case OCTEON_CONFIG_TYPE_DEFAULT:
+		if (oct->chip_id == OCTEON_CN66XX) {
+			ret = (void *)&default_cn66xx_conf;
+		} else if ((oct->chip_id == OCTEON_CN68XX) &&
+			   (card_type == LIO_210NV)) {
+			ret =  (void *)&default_cn68xx_210nv_conf;
+		} else if ((oct->chip_id == OCTEON_CN68XX) &&
+			   (card_type == LIO_410NV)) {
+			ret =  (void *)&default_cn68xx_conf;
+		}
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
+{
+	switch (oct->chip_id) {
+	case OCTEON_CN66XX:
+	case OCTEON_CN68XX:
+		return lio_validate_cn6xxx_config_info(oct, conf);
+
+	default:
+		break;
+	}
+
+	return 1;
+}
+
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
+{
+	void *conf = NULL;
+
+	conf = __retrieve_octeon_config_info(oct, card_type);
+	if (!conf)
+		return NULL;
+
+	if (__verify_octeon_config_info(oct, conf)) {
+		dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
+		return NULL;
+	}
+
+	return conf;
+}
+
+char *lio_get_state_string(atomic_t *state_ptr)
+{
+	s32 istate = (s32)atomic_read(state_ptr);
+
+	if (istate > OCT_DEV_STATES || istate < 0)
+		return oct_dev_state_str[OCT_DEV_STATE_INVALID];
+	return oct_dev_state_str[istate];
+}
+
+static char *get_oct_app_string(u32 app_mode)
+{
+	if (app_mode <= CVM_DRV_APP_END)
+		return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
+	return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
+}
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+			     size_t size)
+{
+	int ret = 0;
+	u8 *p;
+	u8 *buffer;
+	u32 crc32_result;
+	u64 load_addr;
+	u32 image_len;
+	struct octeon_firmware_file_header *h;
+	u32 i;
+
+	if (size < sizeof(struct octeon_firmware_file_header)) {
+		dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+			(u32)size,
+			(u32)sizeof(struct octeon_firmware_file_header));
+		return -EINVAL;
+	}
+
+	h = (struct octeon_firmware_file_header *)data;
+
+	if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+		dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+		return -EINVAL;
+	}
+
+	crc32_result =
+		crc32(~0, data,
+		      sizeof(struct octeon_firmware_file_header) -
+		      sizeof(u32)) ^ ~0U;
+	if (crc32_result != be32_to_cpu(h->crc32)) {
+		dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+			crc32_result, be32_to_cpu(h->crc32));
+		return -EINVAL;
+	}
+
+	if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
+		dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
+			LIQUIDIO_VERSION, h->version);
+		return -EINVAL;
+	}
+
+	if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+		dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+			be32_to_cpu(h->num_images));
+		return -EINVAL;
+	}
+
+	dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+	snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+		 h->version);
+
+	buffer = kmalloc(size, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	memcpy(buffer, data, size);
+
+	p = buffer + sizeof(struct octeon_firmware_file_header);
+
+	/* load all images */
+	for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+		load_addr = be64_to_cpu(h->desc[i].addr);
+		image_len = be32_to_cpu(h->desc[i].len);
+
+		/* validate the image */
+		crc32_result = crc32(~0, p, image_len) ^ ~0U;
+		if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
+			dev_err(&oct->pci_dev->dev,
+				"Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
+				i, crc32_result,
+				be32_to_cpu(h->desc[i].crc32));
+			ret = -EINVAL;
+			goto done_downloading;
+		}
+
+		/* download the image */
+		octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+
+		p += image_len;
+		dev_dbg(&oct->pci_dev->dev,
+			"Downloaded image %d (%d bytes) to address 0x%016llx\n",
+			i, image_len, load_addr);
+	}
+
+	/* Invoke the bootcmd */
+	ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+done_downloading:
+	kfree(buffer);
+
+	return ret;
+}
+
+void octeon_free_device_mem(struct octeon_device *oct)
+{
+	u32 i;
+
+	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+		/* could check  mask as well */
+		if (oct->droq[i])
+			vfree(oct->droq[i]);
+	}
+
+	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+		/* could check mask as well */
+		if (oct->instr_queue[i])
+			vfree(oct->instr_queue[i]);
+	}
+
+	i = oct->octeon_id;
+	vfree(oct);
+
+	octeon_device[i] = NULL;
+	octeon_device_count--;
+}
+
+static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
+							u32 priv_size)
+{
+	struct octeon_device *oct;
+	u8 *buf = NULL;
+	u32 octdevsize = 0, configsize = 0, size;
+
+	switch (pci_id) {
+	case OCTEON_CN68XX:
+	case OCTEON_CN66XX:
+		configsize = sizeof(struct octeon_cn6xxx);
+		break;
+
+	default:
+		pr_err("%s: Unknown PCI Device: 0x%x\n",
+		       __func__,
+		       pci_id);
+		return NULL;
+	}
+
+	if (configsize & 0x7)
+		configsize += (8 - (configsize & 0x7));
+
+	octdevsize = sizeof(struct octeon_device);
+	if (octdevsize & 0x7)
+		octdevsize += (8 - (octdevsize & 0x7));
+
+	if (priv_size & 0x7)
+		priv_size += (8 - (priv_size & 0x7));
+
+	size = octdevsize + priv_size + configsize +
+		(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
+
+	buf = vmalloc(size);
+	if (!buf)
+		return NULL;
+
+	memset(buf, 0, size);
+
+	oct = (struct octeon_device *)buf;
+	oct->priv = (void *)(buf + octdevsize);
+	oct->chip = (void *)(buf + octdevsize + priv_size);
+	oct->dispatch.dlist = (struct octeon_dispatch *)
+		(buf + octdevsize + priv_size + configsize);
+
+	return oct;
+}
+
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+					     u32 priv_size)
+{
+	u32 oct_idx = 0;
+	struct octeon_device *oct = NULL;
+
+	for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
+		if (!octeon_device[oct_idx])
+			break;
+
+	if (oct_idx == MAX_OCTEON_DEVICES)
+		return NULL;
+
+	oct = octeon_allocate_device_mem(pci_id, priv_size);
+	if (!oct)
+		return NULL;
+
+	spin_lock_init(&oct->pci_win_lock);
+	spin_lock_init(&oct->mem_access_lock);
+
+	octeon_device_count++;
+	octeon_device[oct_idx] = oct;
+
+	oct->octeon_id = oct_idx;
+	snprintf((oct->device_name), sizeof(oct->device_name),
+		 "LiquidIO%d", (oct->octeon_id));
+
+	return oct;
+}
+
+int octeon_setup_instr_queues(struct octeon_device *oct)
+{
+	u32 i, num_iqs = 0;
+	u32 num_descs = 0;
+
+	/* this causes queue 0 to be default queue */
+	if (OCTEON_CN6XXX(oct)) {
+		num_iqs = 1;
+		num_descs =
+			CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+	}
+
+	oct->num_iqs = 0;
+
+	for (i = 0; i < num_iqs; i++) {
+		oct->instr_queue[i] =
+			vmalloc(sizeof(struct octeon_instr_queue));
+		if (!oct->instr_queue[i])
+			return 1;
+
+		memset(oct->instr_queue[i], 0,
+		       sizeof(struct octeon_instr_queue));
+
+		oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
+		if (octeon_init_instr_queue(oct, i, num_descs))
+			return 1;
+
+		oct->num_iqs++;
+	}
+
+	return 0;
+}
+
+int octeon_setup_output_queues(struct octeon_device *oct)
+{
+	u32 i, num_oqs = 0;
+	u32 num_descs = 0;
+	u32 desc_size = 0;
+
+	/* this causes queue 0 to be default queue */
+	if (OCTEON_CN6XXX(oct)) {
+		/* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
+		num_oqs = 1;
+		num_descs =
+			CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+		desc_size =
+			CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+	}
+
+	oct->num_oqs = 0;
+
+	for (i = 0; i < num_oqs; i++) {
+		oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
+		if (!oct->droq[i])
+			return 1;
+
+		memset(oct->droq[i], 0, sizeof(struct octeon_droq));
+
+		if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
+			return 1;
+
+		oct->num_oqs++;
+	}
+
+	return 0;
+}
+
+void octeon_set_io_queues_off(struct octeon_device *oct)
+{
+	/* Disable the i/p and o/p queues for this Octeon. */
+
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+}
+
+void octeon_set_droq_pkt_op(struct octeon_device *oct,
+			    u32 q_no,
+			    u32 enable)
+{
+	u32 reg_val = 0;
+
+	/* Disable the i/p and o/p queues for this Octeon. */
+	reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+
+	if (enable)
+		reg_val = reg_val | (1 << q_no);
+	else
+		reg_val = reg_val & (~(1 << q_no));
+
+	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+}
+
+int octeon_init_dispatch_list(struct octeon_device *oct)
+{
+	u32 i;
+
+	oct->dispatch.count = 0;
+
+	for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+		oct->dispatch.dlist[i].opcode = 0;
+		INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
+	}
+
+	for (i = 0; i <= REQTYPE_LAST; i++)
+		octeon_register_reqtype_free_fn(oct, i, NULL);
+
+	spin_lock_init(&oct->dispatch.lock);
+
+	return 0;
+}
+
+void octeon_delete_dispatch_list(struct octeon_device *oct)
+{
+	u32 i;
+	struct list_head freelist, *temp, *tmp2;
+
+	INIT_LIST_HEAD(&freelist);
+
+	spin_lock_bh(&oct->dispatch.lock);
+
+	for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+		struct list_head *dispatch;
+
+		dispatch = &oct->dispatch.dlist[i].list;
+		while (dispatch->next != dispatch) {
+			temp = dispatch->next;
+			list_del(temp);
+			list_add_tail(temp, &freelist);
+		}
+
+		oct->dispatch.dlist[i].opcode = 0;
+	}
+
+	oct->dispatch.count = 0;
+
+	spin_unlock_bh(&oct->dispatch.lock);
+
+	list_for_each_safe(temp, tmp2, &freelist) {
+		list_del(temp);
+		vfree(temp);
+	}
+}
+
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+		    u16 subcode)
+{
+	u32 idx;
+	struct list_head *dispatch;
+	octeon_dispatch_fn_t fn = NULL;
+	u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+	idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+	spin_lock_bh(&octeon_dev->dispatch.lock);
+
+	if (octeon_dev->dispatch.count == 0) {
+		spin_unlock_bh(&octeon_dev->dispatch.lock);
+		return NULL;
+	}
+
+	if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
+		spin_unlock_bh(&octeon_dev->dispatch.lock);
+		return NULL;
+	}
+
+	if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+		fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
+	} else {
+		list_for_each(dispatch,
+			      &octeon_dev->dispatch.dlist[idx].list) {
+			if (((struct octeon_dispatch *)dispatch)->opcode ==
+			    combined_opcode) {
+				fn = ((struct octeon_dispatch *)
+				      dispatch)->dispatch_fn;
+				break;
+			}
+		}
+	}
+
+	spin_unlock_bh(&octeon_dev->dispatch.lock);
+	return fn;
+}
+
+/* octeon_register_dispatch_fn
+ * Parameters:
+ *   octeon_id - id of the octeon device.
+ *   opcode    - opcode for which driver should call the registered function
+ *   subcode   - subcode for which driver should call the registered function
+ *   fn        - The function to call when a packet with "opcode" arrives in
+ *		  octeon output queues.
+ *   fn_arg    - The argument to be passed when calling function "fn".
+ * Description:
+ *   Registers a function and its argument to be called when a packet
+ *   arrives in Octeon output queues with "opcode".
+ * Returns:
+ *   Success: 0
+ *   Failure: 1
+ * Locks:
+ *   No locks are held.
+ */
+int
+octeon_register_dispatch_fn(struct octeon_device *oct,
+			    u16 opcode,
+			    u16 subcode,
+			    octeon_dispatch_fn_t fn, void *fn_arg)
+{
+	u32 idx;
+	octeon_dispatch_fn_t pfn;
+	u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+	idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+	spin_lock_bh(&oct->dispatch.lock);
+	/* Add dispatch function to first level of lookup table */
+	if (oct->dispatch.dlist[idx].opcode == 0) {
+		oct->dispatch.dlist[idx].opcode = combined_opcode;
+		oct->dispatch.dlist[idx].dispatch_fn = fn;
+		oct->dispatch.dlist[idx].arg = fn_arg;
+		oct->dispatch.count++;
+		spin_unlock_bh(&oct->dispatch.lock);
+		return 0;
+	}
+
+	spin_unlock_bh(&oct->dispatch.lock);
+
+	/* Check if there was a function already registered for this
+	 * opcode/subcode.
+	 */
+	pfn = octeon_get_dispatch(oct, opcode, subcode);
+	if (!pfn) {
+		struct octeon_dispatch *dispatch;
+
+		dev_dbg(&oct->pci_dev->dev,
+			"Adding opcode to dispatch list linked list\n");
+		dispatch = (struct octeon_dispatch *)
+			   vmalloc(sizeof(struct octeon_dispatch));
+		if (!dispatch) {
+			dev_err(&oct->pci_dev->dev,
+				"No memory to add dispatch function\n");
+			return 1;
+		}
+		dispatch->opcode = combined_opcode;
+		dispatch->dispatch_fn = fn;
+		dispatch->arg = fn_arg;
+
+		/* Add dispatch function to linked list of fn ptrs
+		 * at the hashed index.
+		 */
+		spin_lock_bh(&oct->dispatch.lock);
+		list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
+		oct->dispatch.count++;
+		spin_unlock_bh(&oct->dispatch.lock);
+
+	} else {
+		dev_err(&oct->pci_dev->dev,
+			"Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
+			opcode, subcode);
+		return 1;
+	}
+
+	return 0;
+}
+
+/* octeon_unregister_dispatch_fn
+ * Parameters:
+ *   oct       - octeon device
+ *   opcode    - driver should unregister the function for this opcode
+ *   subcode   - driver should unregister the function for this subcode
+ * Description:
+ *   Unregister the function set for this opcode+subcode.
+ * Returns:
+ *   Success: 0
+ *   Failure: 1
+ * Locks:
+ *   No locks are held.
+ */
+int
+octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
+			      u16 subcode)
+{
+	int retval = 0;
+	u32 idx;
+	struct list_head *dispatch, *dfree = NULL, *tmp2;
+	u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+	idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+	spin_lock_bh(&oct->dispatch.lock);
+
+	if (oct->dispatch.count == 0) {
+		spin_unlock_bh(&oct->dispatch.lock);
+		dev_err(&oct->pci_dev->dev,
+			"No dispatch functions registered for this device\n");
+		return 1;
+	}
+
+	if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
+		dispatch = &oct->dispatch.dlist[idx].list;
+		if (dispatch->next != dispatch) {
+			dispatch = dispatch->next;
+			oct->dispatch.dlist[idx].opcode =
+				((struct octeon_dispatch *)dispatch)->opcode;
+			oct->dispatch.dlist[idx].dispatch_fn =
+				((struct octeon_dispatch *)
+				 dispatch)->dispatch_fn;
+			oct->dispatch.dlist[idx].arg =
+				((struct octeon_dispatch *)dispatch)->arg;
+			list_del(dispatch);
+			dfree = dispatch;
+		} else {
+			oct->dispatch.dlist[idx].opcode = 0;
+			oct->dispatch.dlist[idx].dispatch_fn = NULL;
+			oct->dispatch.dlist[idx].arg = NULL;
+		}
+	} else {
+		retval = 1;
+		list_for_each_safe(dispatch, tmp2,
+				   &(oct->dispatch.dlist[idx].
+				     list)) {
+			if (((struct octeon_dispatch *)dispatch)->opcode ==
+			    combined_opcode) {
+				list_del(dispatch);
+				dfree = dispatch;
+				retval = 0;
+			}
+		}
+	}
+
+	if (!retval)
+		oct->dispatch.count--;
+
+	spin_unlock_bh(&oct->dispatch.lock);
+
+	if (dfree)
+		vfree(dfree);
+
+	return retval;
+}
+
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
+{
+	u32 i;
+	char app_name[16];
+	struct octeon_device *oct = (struct octeon_device *)buf;
+	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+	struct octeon_core_setup *cs = NULL;
+	u32 num_nic_ports = 0;
+
+	if (OCTEON_CN6XXX(oct))
+		num_nic_ports =
+			CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+
+	if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
+		dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
+			atomic_read(&oct->status));
+		goto core_drv_init_err;
+	}
+
+	strncpy(app_name,
+		get_oct_app_string(
+		(u32)recv_pkt->rh.r_core_drv_init.app_mode),
+		sizeof(app_name) - 1);
+	oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+	if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
+		oct->fw_info.max_nic_ports =
+			(u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
+		oct->fw_info.num_gmx_ports =
+			(u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
+	}
+
+	if (oct->fw_info.max_nic_ports < num_nic_ports) {
+		dev_err(&oct->pci_dev->dev,
+			"Config has more ports than firmware allows (%d > %d).\n",
+			num_nic_ports, oct->fw_info.max_nic_ports);
+		goto core_drv_init_err;
+	}
+	oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
+	oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+	atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+	cs = &core_setup[oct->octeon_id];
+
+	if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+		dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
+			(u32)sizeof(*cs),
+			recv_pkt->buffer_size[0]);
+	}
+
+	memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+	strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
+	strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
+		OCT_SERIAL_LEN);
+
+	octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
+
+	oct->boardinfo.major = cs->board_rev_major;
+	oct->boardinfo.minor = cs->board_rev_minor;
+
+	dev_info(&oct->pci_dev->dev,
+		 "Running %s (%llu Hz)\n",
+		 app_name, CVM_CAST64(cs->corefreq));
+
+core_drv_init_err:
+	for (i = 0; i < recv_pkt->buffer_count; i++)
+		recv_buffer_free(recv_pkt->buffer_ptr[i]);
+	octeon_free_recv_info(recv_info);
+	return 0;
+}
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
+
+{
+	if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
+	    (oct->io_qmask.iq & (1UL << q_no)))
+		return oct->instr_queue[q_no]->max_count;
+
+	return -1;
+}
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
+{
+	if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
+	    (oct->io_qmask.oq & (1UL << q_no)))
+		return oct->droq[q_no]->max_count;
+	return -1;
+}
+
+/* Retruns the host firmware handshake OCTEON specific configuration */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct)
+{
+	struct octeon_config *default_oct_conf = NULL;
+
+	/* check the OCTEON Device model & return the corresponding octeon
+	 * configuration
+	 */
+
+	if (OCTEON_CN6XXX(oct)) {
+		default_oct_conf =
+			(struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+	}
+
+	return default_oct_conf;
+}
+
+/* scratch register address is same in all the OCT-II and CN70XX models */
+#define CNXX_SLI_SCRATCH1   0x3C0
+
+/** Get the octeon device pointer.
+ *  @param octeon_id  - The id for which the octeon device pointer is required.
+ *  @return Success: Octeon device pointer.
+ *  @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id)
+{
+	if (octeon_id >= MAX_OCTEON_DEVICES)
+		return NULL;
+	else
+		return octeon_device[octeon_id];
+}
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
+{
+	u64 val64;
+	unsigned long flags;
+	u32 val32, addrhi;
+
+	spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+	/* The windowed read happens when the LSB of the addr is written.
+	 * So write MSB first
+	 */
+	addrhi = (addr >> 32);
+	if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+		addrhi |= 0x00060000;
+	writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
+
+	/* Read back to preserve ordering of writes */
+	val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+
+	writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
+	val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+
+	val64 = readq(oct->reg_list.pci_win_rd_data);
+
+	spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+
+	return val64;
+}
+
+void lio_pci_writeq(struct octeon_device *oct,
+		    u64 val,
+		    u64 addr)
+{
+	u32 val32;
+	unsigned long flags;
+
+	spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+	writeq(addr, oct->reg_list.pci_win_wr_addr);
+
+	/* The write happens when the LSB is written. So write MSB first. */
+	writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
+	/* Read the MSB to ensure ordering of writes. */
+	val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+
+	writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
+
+	spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+}
+
+int octeon_mem_access_ok(struct octeon_device *oct)
+{
+	u64 access_okay = 0;
+
+	/* Check to make sure a DDR interface is enabled */
+	u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+
+	access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+
+	return access_okay ? 0 : 1;
+}
+
+int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
+{
+	int ret = 1;
+	u32 ms;
+
+	if (!timeout)
+		return ret;
+
+	while (*timeout == 0)
+		schedule_timeout_uninterruptible(HZ / 10);
+
+	for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
+	     ms += HZ / 10) {
+		ret = octeon_mem_access_ok(oct);
+
+		/* wait 100 ms */
+		if (ret)
+			schedule_timeout_uninterruptible(HZ / 10);
+	}
+
+	return ret;
+}
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ *  This function is exported to other modules.
+ *  @param dev - octeon device pointer passed as a void *.
+ *  @return octeon device id
+ */
+int lio_get_device_id(void *dev)
+{
+	struct octeon_device *octeon_dev = (struct octeon_device *)dev;
+	u32 i;
+
+	for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+		if (octeon_device[i] == octeon_dev)
+			return octeon_dev->octeon_id;
+	return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
new file mode 100644
index 0000000..36e1f85
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -0,0 +1,649 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_device.h
+ *  \brief Host Driver: This file defines the octeon device structure.
+ */
+
+#ifndef _OCTEON_DEVICE_H_
+#define  _OCTEON_DEVICE_H_
+
+/** PCI VendorId Device Id */
+#define  OCTEON_CN68XX_PCIID          0x91177d
+#define  OCTEON_CN66XX_PCIID          0x92177d
+
+/** Driver identifies chips by these Ids, created by clubbing together
+ *  DeviceId+RevisionId; Where Revision Id is not used to distinguish
+ *  between chips, a value of 0 is used for revision id.
+ */
+#define  OCTEON_CN68XX                0x0091
+#define  OCTEON_CN66XX                0x0092
+
+/** Endian-swap modes supported by Octeon. */
+enum octeon_pci_swap_mode {
+	OCTEON_PCI_PASSTHROUGH = 0,
+	OCTEON_PCI_64BIT_SWAP = 1,
+	OCTEON_PCI_32BIT_BYTE_SWAP = 2,
+	OCTEON_PCI_32BIT_LW_SWAP = 3
+};
+
+/*---------------   PCI BAR1 index registers -------------*/
+
+/* BAR1 Mask */
+#define    PCI_BAR1_ENABLE_CA            1
+#define    PCI_BAR1_ENDIAN_MODE          OCTEON_PCI_64BIT_SWAP
+#define    PCI_BAR1_ENTRY_VALID          1
+#define    PCI_BAR1_MASK                 ((PCI_BAR1_ENABLE_CA << 3)   \
+					    | (PCI_BAR1_ENDIAN_MODE << 1) \
+					    | PCI_BAR1_ENTRY_VALID)
+
+/** Octeon Device state.
+ *  Each octeon device goes through each of these states
+ *  as it is initialized.
+ */
+#define    OCT_DEV_BEGIN_STATE            0x0
+#define    OCT_DEV_PCI_MAP_DONE           0x1
+#define    OCT_DEV_DISPATCH_INIT_DONE     0x2
+#define    OCT_DEV_INSTR_QUEUE_INIT_DONE  0x3
+#define    OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4
+#define    OCT_DEV_RESP_LIST_INIT_DONE    0x5
+#define    OCT_DEV_DROQ_INIT_DONE         0x6
+#define    OCT_DEV_IO_QUEUES_DONE         0x7
+#define    OCT_DEV_CONSOLE_INIT_DONE      0x8
+#define    OCT_DEV_HOST_OK                0x9
+#define    OCT_DEV_CORE_OK                0xa
+#define    OCT_DEV_RUNNING                0xb
+#define    OCT_DEV_IN_RESET               0xc
+#define    OCT_DEV_STATE_INVALID          0xd
+
+#define    OCT_DEV_STATES                 OCT_DEV_STATE_INVALID
+
+/** Octeon Device interrupts
+  *  These interrupt bits are set in int_status filed of
+  *  octeon_device structure
+  */
+#define	   OCT_DEV_INTR_DMA0_FORCE	  0x01
+#define	   OCT_DEV_INTR_DMA1_FORCE	  0x02
+#define	   OCT_DEV_INTR_PKT_DATA	  0x04
+
+#define LIO_RESET_SECS (3)
+
+/*---------------------------DISPATCH LIST-------------------------------*/
+
+/** The dispatch list entry.
+ *  The driver keeps a record of functions registered for each
+ *  response header opcode in this structure. Since the opcode is
+ *  hashed to index into the driver's list, more than one opcode
+ *  can hash to the same entry, in which case the list field points
+ *  to a linked list with the other entries.
+ */
+struct octeon_dispatch {
+	/** List head for this entry */
+	struct list_head list;
+
+	/** The opcode for which the dispatch function & arg should be used */
+	u16 opcode;
+
+	/** The function to be called for a packet received by the driver */
+	octeon_dispatch_fn_t dispatch_fn;
+
+	/* The application specified argument to be passed to the above
+	 * function along with the received packet
+	 */
+	void *arg;
+};
+
+/** The dispatch list structure. */
+struct octeon_dispatch_list {
+	/** access to dispatch list must be atomic */
+	spinlock_t lock;
+
+	/** Count of dispatch functions currently registered */
+	u32 count;
+
+	/** The list of dispatch functions */
+	struct octeon_dispatch *dlist;
+};
+
+/*-----------------------  THE OCTEON DEVICE  ---------------------------*/
+
+#define OCT_MEM_REGIONS     3
+/** PCI address space mapping information.
+ *  Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ *  Octeon gets mapped to different physical address spaces in
+ *  the kernel.
+ */
+struct octeon_mmio {
+	/** PCI address to which the BAR is mapped. */
+	u64 start;
+
+	/** Length of this PCI address space. */
+	u32 len;
+
+	/** Length that has been mapped to phys. address space. */
+	u32 mapped_len;
+
+	/** The physical address to which the PCI address space is mapped. */
+	u8 __iomem *hw_addr;
+
+	/** Flag indicating the mapping was successful. */
+	u32 done;
+};
+
+#define   MAX_OCTEON_MAPS    32
+
+struct octeon_io_enable {
+	u32 iq;
+	u32 oq;
+	u32 iq64B;
+};
+
+struct octeon_reg_list {
+	u32 __iomem *pci_win_wr_addr_hi;
+	u32 __iomem *pci_win_wr_addr_lo;
+	u64 __iomem *pci_win_wr_addr;
+
+	u32 __iomem *pci_win_rd_addr_hi;
+	u32 __iomem *pci_win_rd_addr_lo;
+	u64 __iomem *pci_win_rd_addr;
+
+	u32 __iomem *pci_win_wr_data_hi;
+	u32 __iomem *pci_win_wr_data_lo;
+	u64 __iomem *pci_win_wr_data;
+
+	u32 __iomem *pci_win_rd_data_hi;
+	u32 __iomem *pci_win_rd_data_lo;
+	u64 __iomem *pci_win_rd_data;
+};
+
+#define OCTEON_CONSOLE_MAX_READ_BYTES 512
+struct octeon_console {
+	u32 active;
+	u32 waiting;
+	u64 addr;
+	u32 buffer_size;
+	u64 input_base_addr;
+	u64 output_base_addr;
+	char leftover[OCTEON_CONSOLE_MAX_READ_BYTES];
+};
+
+struct octeon_board_info {
+	char name[OCT_BOARD_NAME];
+	char serial_number[OCT_SERIAL_LEN];
+	u64 major;
+	u64 minor;
+};
+
+struct octeon_fn_list {
+	void (*setup_iq_regs)(struct octeon_device *, u32);
+	void (*setup_oq_regs)(struct octeon_device *, u32);
+
+	irqreturn_t (*process_interrupt_regs)(void *);
+	int (*soft_reset)(struct octeon_device *);
+	int (*setup_device_regs)(struct octeon_device *);
+	void (*reinit_regs)(struct octeon_device *);
+	void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
+	void (*bar1_idx_write)(struct octeon_device *, u32, u32);
+	u32 (*bar1_idx_read)(struct octeon_device *, u32);
+	u32 (*update_iq_read_idx)(struct octeon_device *,
+				  struct octeon_instr_queue *);
+
+	void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
+	void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
+
+	void (*enable_interrupt)(void *);
+	void (*disable_interrupt)(void *);
+
+	void (*enable_io_queues)(struct octeon_device *);
+	void (*disable_io_queues)(struct octeon_device *);
+};
+
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Structure for named memory blocks
+ * Number of descriptors
+ * available can be changed without affecting compatiblity,
+ * but name length changes require a bump in the bootmem
+ * descriptor version
+ * Note: This structure must be naturally 64 bit aligned, as a single
+ * memory image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+	/** Base address of named block */
+	u64 base_addr;
+
+	/** Size actually allocated for named block */
+	u64 size;
+
+	/** name of named block */
+	char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+struct oct_fw_info {
+	u32 max_nic_ports;      /** max nic ports for the device */
+	u32 num_gmx_ports;      /** num gmx ports */
+	u64 app_cap_flags;      /** firmware cap flags */
+
+	/** The core application is running in this mode.
+	 * See octeon-drv-opcodes.h for values.
+	 */
+	u32 app_mode;
+	char   liquidio_firmware_version[32];
+};
+
+/* wrappers around work structs */
+struct cavium_wk {
+	struct delayed_work work;
+	void *ctxptr;
+	size_t ctxul;
+};
+
+struct cavium_wq {
+	struct workqueue_struct *wq;
+	struct cavium_wk wk;
+};
+
+struct octdev_props {
+	/* Each interface in the Octeon device has a network
+	 * device pointer (used for OS specific calls).
+	 */
+	struct net_device *netdev;
+};
+
+/** The Octeon device.
+ *  Each Octeon device has this structure to represent all its
+ *  components.
+ */
+struct octeon_device {
+	/** Lock for PCI window configuration accesses */
+	spinlock_t pci_win_lock;
+
+	/** Lock for memory accesses */
+	spinlock_t mem_access_lock;
+
+	/** PCI device pointer */
+	struct pci_dev *pci_dev;
+
+	/** Chip specific information. */
+	void *chip;
+
+	/** Number of interfaces detected in this octeon device. */
+	u32 ifcount;
+
+	struct octdev_props props[MAX_OCTEON_LINKS];
+
+	/** Octeon Chip type. */
+	u16 chip_id;
+	u16 rev_id;
+
+	/** This device's id - set by the driver. */
+	u32 octeon_id;
+
+	/** This device's PCIe port used for traffic. */
+	u16 pcie_port;
+
+	u16 flags;
+#define LIO_FLAG_MSI_ENABLED                  (u32)(1 << 1)
+#define LIO_FLAG_MSIX_ENABLED                 (u32)(1 << 2)
+
+	/** The state of this device */
+	atomic_t status;
+
+	/** memory mapped io range */
+	struct octeon_mmio mmio[OCT_MEM_REGIONS];
+
+	struct octeon_reg_list reg_list;
+
+	struct octeon_fn_list fn_list;
+
+	struct octeon_board_info boardinfo;
+
+	u32 num_iqs;
+
+	/* The pool containing pre allocated buffers used for soft commands */
+	struct octeon_sc_buffer_pool	sc_buf_pool;
+
+	/** The input instruction queues */
+	struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+
+	/** The doubly-linked list of instruction response */
+	struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
+
+	u32 num_oqs;
+
+	/** The DROQ output queues  */
+	struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+
+	struct octeon_io_enable io_qmask;
+
+	/** List of dispatch functions */
+	struct octeon_dispatch_list dispatch;
+
+	/* Interrupt Moderation */
+	struct oct_intrmod_cfg intrmod;
+
+	u32 int_status;
+
+	u64 droq_intr;
+
+	/** Physical location of the cvmx_bootmem_desc_t in octeon memory */
+	u64 bootmem_desc_addr;
+
+	/** Placeholder memory for named blocks.
+	 * Assumes single-threaded access
+	 */
+	struct cvmx_bootmem_named_block_desc bootmem_named_block_desc;
+
+	/** Address of consoles descriptor */
+	u64 console_desc_addr;
+
+	/** Number of consoles available. 0 means they are inaccessible */
+	u32 num_consoles;
+
+	/* Console caches */
+	struct octeon_console console[MAX_OCTEON_MAPS];
+
+	/* Coprocessor clock rate. */
+	u64 coproc_clock_rate;
+
+	/** The core application is running in this mode. See liquidio_common.h
+	 * for values.
+	 */
+	u32 app_mode;
+
+	struct oct_fw_info fw_info;
+
+	/** The name given to this device. */
+	char device_name[32];
+
+	/** Application Context */
+	void *app_ctx;
+
+	struct cavium_wq dma_comp_wq;
+
+	struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+
+	struct cavium_wk nic_poll_work;
+
+	struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
+
+	void *priv;
+};
+
+#define  OCTEON_CN6XXX(oct)           ((oct->chip_id == OCTEON_CN66XX) || \
+				       (oct->chip_id == OCTEON_CN68XX))
+#define CHIP_FIELD(oct, TYPE, field)             \
+	(((struct octeon_ ## TYPE  *)(oct->chip))->field)
+
+struct oct_intrmod_cmd {
+	struct octeon_device *oct_dev;
+	struct octeon_soft_command *sc;
+	struct oct_intrmod_cfg *cfg;
+};
+
+/*------------------ Function Prototypes ----------------------*/
+
+/** Initialize device list memory */
+void octeon_init_device_list(int conf_type);
+
+/** Free memory for Input and Output queue structures for a octeon device */
+void octeon_free_device_mem(struct octeon_device *);
+
+/* Look up a free entry in the octeon_device table and allocate resources
+ * for the octeon_device structure for an octeon device. Called at init
+ * time.
+ */
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+					     u32 priv_size);
+
+/**  Initialize the driver's dispatch list which is a mix of a hash table
+ *  and a linked list. This is done at driver load time.
+ *  @param octeon_dev - pointer to the octeon device structure.
+ *  @return 0 on success, else -ve error value
+ */
+int octeon_init_dispatch_list(struct octeon_device *octeon_dev);
+
+/**  Delete the driver's dispatch list and all registered entries.
+ * This is done at driver unload time.
+ *  @param octeon_dev - pointer to the octeon device structure.
+ */
+void octeon_delete_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Initialize the core device fields with the info returned by the FW.
+ * @param recv_info - Receive info structure
+ * @param buf       - Receive buffer
+ */
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf);
+
+/** Gets the dispatch function registered to receive packets with a
+ *  given opcode/subcode.
+ *  @param  octeon_dev  - the octeon device pointer.
+ *  @param  opcode      - the opcode for which the dispatch function
+ *                        is to checked.
+ *  @param  subcode     - the subcode for which the dispatch function
+ *                        is to checked.
+ *
+ *  @return Success: octeon_dispatch_fn_t (dispatch function pointer)
+ *  @return Failure: NULL
+ *
+ *  Looks up the dispatch list to get the dispatch function for a
+ *  given opcode.
+ */
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+		    u16 subcode);
+
+/** Get the octeon device pointer.
+ *  @param octeon_id  - The id for which the octeon device pointer is required.
+ *  @return Success: Octeon device pointer.
+ *  @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id);
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ *  This function is exported to other modules.
+ *  @param dev - octeon device pointer passed as a void *.
+ *  @return octeon device id
+ */
+int lio_get_device_id(void *dev);
+
+static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
+{
+	u16 rev = (oct->rev_id & 0xC) >> 2;
+
+	return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
+{
+	return oct->rev_id & 0x3;
+}
+
+/** Read windowed register.
+ *  @param  oct   -  pointer to the Octeon device.
+ *  @param  addr  -  Address of the register to read.
+ *
+ *  This routine is called to read from the indirectly accessed
+ *  Octeon registers that are visible through a PCI BAR0 mapped window
+ *  register.
+ *  @return  - 64 bit value read from the register.
+ */
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr);
+
+/** Write windowed register.
+ *  @param  oct  -  pointer to the Octeon device.
+ *  @param  val  -  Value to write
+ *  @param  addr -  Address of the register to write
+ *
+ *  This routine is called to write to the indirectly accessed
+ *  Octeon registers that are visible through a PCI BAR0 mapped window
+ *  register.
+ *  @return   Nothing.
+ */
+void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr);
+
+/* Routines for reading and writing CSRs */
+#define   octeon_write_csr(oct_dev, reg_off, value) \
+		writel(value, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define   octeon_write_csr64(oct_dev, reg_off, val64) \
+		writeq(val64, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define   octeon_read_csr(oct_dev, reg_off)         \
+		readl(oct_dev->mmio[0].hw_addr + reg_off)
+
+#define   octeon_read_csr64(oct_dev, reg_off)         \
+		readq(oct_dev->mmio[0].hw_addr + reg_off)
+
+/**
+ * Checks if memory access is okay
+ *
+ * @param oct which octeon to send to
+ * @return Zero on success, negative on failure.
+ */
+int octeon_mem_access_ok(struct octeon_device *oct);
+
+/**
+ * Waits for DDR initialization.
+ *
+ * @param oct which octeon to send to
+ * @param timeout_in_ms pointer to how long to wait until DDR is initialized
+ * in ms.
+ *                      If contents are 0, it waits until contents are non-zero
+ *                      before starting to check.
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_ddr_init(struct octeon_device *oct,
+			     u32 *timeout_in_ms);
+
+/**
+ * Wait for u-boot to boot and be waiting for a command.
+ *
+ * @param wait_time_hundredths
+ *               Maximum time to wait
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+			       u32 wait_time_hundredths);
+
+/**
+ * Initialize console access
+ *
+ * @param oct which octeon initialize
+ * @return Zero on success, negative on failure.
+ */
+int octeon_init_consoles(struct octeon_device *oct);
+
+/**
+ * Adds access to a console to the device.
+ *
+ * @param oct which octeon to add to
+ * @param console_num which console
+ * @return Zero on success, negative on failure.
+ */
+int octeon_add_console(struct octeon_device *oct, u32 console_num);
+
+/** write or read from a console */
+int octeon_console_write(struct octeon_device *oct, u32 console_num,
+			 char *buffer, u32 write_request_size, u32 flags);
+int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+			char *buffer, u32 buf_size, u32 flags);
+int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
+
+/** Removes all attached consoles. */
+void octeon_remove_consoles(struct octeon_device *oct);
+
+/**
+ * Send a string to u-boot on console 0 as a command.
+ *
+ * @param oct which octeon to send to
+ * @param cmd_str String to send
+ * @param wait_hundredths Time to wait for u-boot to accept the command.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+			    u32 wait_hundredths);
+
+/** Parses, validates, and downloads firmware, then boots associated cores.
+ *  @param oct which octeon to download firmware to
+ *  @param data  - The complete firmware file image
+ *  @param size  - The size of the data
+ *
+ *  @return 0 if success.
+ *         -EINVAL if file is incompatible or badly formatted.
+ *         -ENODEV if no handler was found for the application type or an
+ *         invalid octeon id was passed.
+ */
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+			     size_t size);
+
+char *lio_get_state_string(atomic_t *state_ptr);
+
+/** Sets up instruction queues for the device
+ *  @param oct which octeon to setup
+ *
+ *  @return 0 if success. 1 if fails
+ */
+int octeon_setup_instr_queues(struct octeon_device *oct);
+
+/** Sets up output queues for the device
+ *  @param oct which octeon to setup
+ *
+ *  @return 0 if success. 1 if fails
+ */
+int octeon_setup_output_queues(struct octeon_device *oct);
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
+
+/** Turns off the input and output queues for the device
+ *  @param oct which octeon to disable
+ */
+void octeon_set_io_queues_off(struct octeon_device *oct);
+
+/** Turns on or off the given output queue for the device
+ *  @param oct which octeon to change
+ *  @param q_no which queue
+ *  @param enable 1 to enable, 0 to disable
+ */
+void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
+
+/** Retrieve the config for the device
+ *  @param oct which octeon
+ *  @param card_type type of card
+ *
+ *  @returns pointer to configuration
+ */
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
+
+/** Gets the octeon device configuration
+ *  @return - pointer to the octeon configuration struture
+ */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
new file mode 100644
index 0000000..94b502a
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -0,0 +1,989 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/* #define CAVIUM_ONLY_PERF_MODE */
+
+#define     CVM_MIN(d1, d2)           (((d1) < (d2)) ? (d1) : (d2))
+#define     CVM_MAX(d1, d2)           (((d1) > (d2)) ? (d1) : (d2))
+
+struct niclist {
+	struct list_head list;
+	void *ptr;
+};
+
+struct __dispatch {
+	struct list_head list;
+	struct octeon_recv_info *rinfo;
+	octeon_dispatch_fn_t disp_fn;
+};
+
+/** Get the argument that the user set when registering dispatch
+ *  function for a given opcode/subcode.
+ *  @param  octeon_dev - the octeon device pointer.
+ *  @param  opcode     - the opcode for which the dispatch argument
+ *                       is to be checked.
+ *  @param  subcode    - the subcode for which the dispatch argument
+ *                       is to be checked.
+ *  @return  Success: void * (argument to the dispatch function)
+ *  @return  Failure: NULL
+ *
+ */
+static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
+					    u16 opcode, u16 subcode)
+{
+	int idx;
+	struct list_head *dispatch;
+	void *fn_arg = NULL;
+	u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+	idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+	spin_lock_bh(&octeon_dev->dispatch.lock);
+
+	if (octeon_dev->dispatch.count == 0) {
+		spin_unlock_bh(&octeon_dev->dispatch.lock);
+		return NULL;
+	}
+
+	if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+		fn_arg = octeon_dev->dispatch.dlist[idx].arg;
+	} else {
+		list_for_each(dispatch,
+			      &octeon_dev->dispatch.dlist[idx].list) {
+			if (((struct octeon_dispatch *)dispatch)->opcode ==
+			    combined_opcode) {
+				fn_arg = ((struct octeon_dispatch *)
+					  dispatch)->arg;
+				break;
+			}
+		}
+	}
+
+	spin_unlock_bh(&octeon_dev->dispatch.lock);
+	return fn_arg;
+}
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+				  struct octeon_droq *droq)
+{
+	u32 pkt_count = 0;
+
+	pkt_count = readl(droq->pkts_sent_reg);
+	if (pkt_count) {
+		atomic_add(pkt_count, &droq->pkts_pending);
+		writel(pkt_count, droq->pkts_sent_reg);
+	}
+
+	return pkt_count;
+}
+
+static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
+{
+	u32 count = 0;
+
+	/* max_empty_descs is the max. no. of descs that can have no buffers.
+	 * If the empty desc count goes beyond this value, we cannot safely
+	 * read in a 64K packet sent by Octeon
+	 * (64K is max pkt size from Octeon)
+	 */
+	droq->max_empty_descs = 0;
+
+	do {
+		droq->max_empty_descs++;
+		count += droq->buffer_size;
+	} while (count < (64 * 1024));
+
+	droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
+}
+
+static void octeon_droq_reset_indices(struct octeon_droq *droq)
+{
+	droq->read_idx = 0;
+	droq->write_idx = 0;
+	droq->refill_idx = 0;
+	droq->refill_count = 0;
+	atomic_set(&droq->pkts_pending, 0);
+}
+
+static void
+octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
+				 struct octeon_droq *droq)
+{
+	u32 i;
+
+	for (i = 0; i < droq->max_count; i++) {
+		if (droq->recv_buf_list[i].buffer) {
+			if (droq->desc_ring) {
+				lio_unmap_ring_info(oct->pci_dev,
+						    (u64)droq->
+						    desc_ring[i].info_ptr,
+						    OCT_DROQ_INFO_SIZE);
+				lio_unmap_ring(oct->pci_dev,
+					       (u64)droq->desc_ring[i].
+					       buffer_ptr,
+					       droq->buffer_size);
+			}
+			recv_buffer_free(droq->recv_buf_list[i].buffer);
+			droq->recv_buf_list[i].buffer = NULL;
+		}
+	}
+
+	octeon_droq_reset_indices(droq);
+}
+
+static int
+octeon_droq_setup_ring_buffers(struct octeon_device *oct,
+			       struct octeon_droq *droq)
+{
+	u32 i;
+	void *buf;
+	struct octeon_droq_desc *desc_ring = droq->desc_ring;
+
+	for (i = 0; i < droq->max_count; i++) {
+		buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+
+		if (!buf) {
+			dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+
+		droq->recv_buf_list[i].buffer = buf;
+		droq->recv_buf_list[i].data = get_rbd(buf);
+
+		droq->info_list[i].length = 0;
+
+		/* map ring buffers into memory */
+		desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+		desc_ring[i].buffer_ptr =
+			lio_map_ring(oct->pci_dev,
+				     droq->recv_buf_list[i].buffer,
+				     droq->buffer_size);
+	}
+
+	octeon_droq_reset_indices(droq);
+
+	octeon_droq_compute_max_packet_bufs(droq);
+
+	return 0;
+}
+
+int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
+{
+	struct octeon_droq *droq = oct->droq[q_no];
+
+	dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+	octeon_droq_destroy_ring_buffers(oct, droq);
+
+	if (droq->recv_buf_list)
+		vfree(droq->recv_buf_list);
+
+	if (droq->info_base_addr)
+		cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
+				       droq->info_alloc_size,
+				       droq->info_base_addr,
+				       droq->info_list_dma);
+
+	if (droq->desc_ring)
+		lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+			     droq->desc_ring, droq->desc_ring_dma);
+
+	memset(droq, 0, OCT_DROQ_SIZE);
+
+	return 0;
+}
+
+int octeon_init_droq(struct octeon_device *oct,
+		     u32 q_no,
+		     u32 num_descs,
+		     u32 desc_size,
+		     void *app_ctx)
+{
+	struct octeon_droq *droq;
+	u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
+	u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+
+	dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+	droq = oct->droq[q_no];
+	memset(droq, 0, OCT_DROQ_SIZE);
+
+	droq->oct_dev = oct;
+	droq->q_no = q_no;
+	if (app_ctx)
+		droq->app_ctx = app_ctx;
+	else
+		droq->app_ctx = (void *)(size_t)q_no;
+
+	c_num_descs = num_descs;
+	c_buf_size = desc_size;
+	if (OCTEON_CN6XXX(oct)) {
+		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+		c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
+		c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+	}
+
+	droq->max_count = c_num_descs;
+	droq->buffer_size = c_buf_size;
+
+	desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+	droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+					(dma_addr_t *)&droq->desc_ring_dma);
+
+	if (!droq->desc_ring) {
+		dev_err(&oct->pci_dev->dev,
+			"Output queue %d ring alloc failed\n", q_no);
+		return 1;
+	}
+
+	dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+		q_no, droq->desc_ring, droq->desc_ring_dma);
+	dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
+		droq->max_count);
+
+	droq->info_list =
+		cnnic_alloc_aligned_dma(oct->pci_dev,
+					(droq->max_count * OCT_DROQ_INFO_SIZE),
+					&droq->info_alloc_size,
+					&droq->info_base_addr,
+					&droq->info_list_dma);
+
+	if (!droq->info_list) {
+		dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
+		lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+			     droq->desc_ring, droq->desc_ring_dma);
+		return 1;
+	}
+
+	droq->recv_buf_list = (struct octeon_recv_buffer *)
+			      vmalloc(droq->max_count *
+						OCT_DROQ_RECVBUF_SIZE);
+	if (!droq->recv_buf_list) {
+		dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
+		goto init_droq_fail;
+	}
+
+	if (octeon_droq_setup_ring_buffers(oct, droq))
+		goto init_droq_fail;
+
+	droq->pkts_per_intr = c_pkts_per_intr;
+	droq->refill_threshold = c_refill_threshold;
+
+	dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
+		droq->max_empty_descs);
+
+	spin_lock_init(&droq->lock);
+
+	INIT_LIST_HEAD(&droq->dispatch_list);
+
+	/* For 56xx Pass1, this function won't be called, so no checks. */
+	oct->fn_list.setup_oq_regs(oct, q_no);
+
+	oct->io_qmask.oq |= (1 << q_no);
+
+	return 0;
+
+init_droq_fail:
+	octeon_delete_droq(oct, q_no);
+	return 1;
+}
+
+/* octeon_create_recv_info
+ * Parameters:
+ *  octeon_dev - pointer to the octeon device structure
+ *  droq       - droq in which the packet arrived.
+ *  buf_cnt    - no. of buffers used by the packet.
+ *  idx        - index in the descriptor for the first buffer in the packet.
+ * Description:
+ *  Allocates a recv_info_t and copies the buffer addresses for packet data
+ *  into the recv_pkt space which starts at an 8B offset from recv_info_t.
+ *  Flags the descriptors for refill later. If available descriptors go
+ *  below the threshold to receive a 64K pkt, new buffers are first allocated
+ *  before the recv_pkt_t is created.
+ *  This routine will be called in interrupt context.
+ * Returns:
+ *  Success: Pointer to recv_info_t
+ *  Failure: NULL.
+ * Locks:
+ *  The droq->lock is held when this routine is called.
+ */
+static inline struct octeon_recv_info *octeon_create_recv_info(
+		struct octeon_device *octeon_dev,
+		struct octeon_droq *droq,
+		u32 buf_cnt,
+		u32 idx)
+{
+	struct octeon_droq_info *info;
+	struct octeon_recv_pkt *recv_pkt;
+	struct octeon_recv_info *recv_info;
+	u32 i, bytes_left;
+
+	info = &droq->info_list[idx];
+
+	recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
+	if (!recv_info)
+		return NULL;
+
+	recv_pkt = recv_info->recv_pkt;
+	recv_pkt->rh = info->rh;
+	recv_pkt->length = (u32)info->length;
+	recv_pkt->buffer_count = (u16)buf_cnt;
+	recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
+
+	i = 0;
+	bytes_left = (u32)info->length;
+
+	while (buf_cnt) {
+		lio_unmap_ring(octeon_dev->pci_dev,
+			       (u64)droq->desc_ring[idx].buffer_ptr,
+			       droq->buffer_size);
+
+		recv_pkt->buffer_size[i] =
+			(bytes_left >=
+			 droq->buffer_size) ? droq->buffer_size : bytes_left;
+
+		recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
+		droq->recv_buf_list[idx].buffer = NULL;
+
+		INCR_INDEX_BY1(idx, droq->max_count);
+		bytes_left -= droq->buffer_size;
+		i++;
+		buf_cnt--;
+	}
+
+	return recv_info;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline u32
+octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
+				struct octeon_droq_desc *desc_ring)
+{
+	u32 desc_refilled = 0;
+
+	u32 refill_index = droq->refill_idx;
+
+	while (refill_index != droq->read_idx) {
+		if (droq->recv_buf_list[refill_index].buffer) {
+			droq->recv_buf_list[droq->refill_idx].buffer =
+				droq->recv_buf_list[refill_index].buffer;
+			droq->recv_buf_list[droq->refill_idx].data =
+				droq->recv_buf_list[refill_index].data;
+			desc_ring[droq->refill_idx].buffer_ptr =
+				desc_ring[refill_index].buffer_ptr;
+			droq->recv_buf_list[refill_index].buffer = NULL;
+			desc_ring[refill_index].buffer_ptr = 0;
+			do {
+				INCR_INDEX_BY1(droq->refill_idx,
+					       droq->max_count);
+				desc_refilled++;
+				droq->refill_count--;
+			} while (droq->recv_buf_list[droq->refill_idx].
+				 buffer);
+		}
+		INCR_INDEX_BY1(refill_index, droq->max_count);
+	}                       /* while */
+	return desc_refilled;
+}
+
+/* octeon_droq_refill
+ * Parameters:
+ *  droq       - droq in which descriptors require new buffers.
+ * Description:
+ *  Called during normal DROQ processing in interrupt mode or by the poll
+ *  thread to refill the descriptors from which buffers were dispatched
+ *  to upper layers. Attempts to allocate new buffers. If that fails, moves
+ *  up buffers (that were not dispatched) to form a contiguous ring.
+ * Returns:
+ *  No of descriptors refilled.
+ * Locks:
+ *  This routine is called with droq->lock held.
+ */
+static u32
+octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
+{
+	struct octeon_droq_desc *desc_ring;
+	void *buf = NULL;
+	u8 *data;
+	u32 desc_refilled = 0;
+
+	desc_ring = droq->desc_ring;
+
+	while (droq->refill_count && (desc_refilled < droq->max_count)) {
+		/* If a valid buffer exists (happens if there is no dispatch),
+		 * reuse
+		 * the buffer, else allocate.
+		 */
+		if (!droq->recv_buf_list[droq->refill_idx].buffer) {
+			buf = recv_buffer_alloc(octeon_dev, droq->q_no,
+						droq->buffer_size);
+			/* If a buffer could not be allocated, no point in
+			 * continuing
+			 */
+			if (!buf)
+				break;
+			droq->recv_buf_list[droq->refill_idx].buffer =
+				buf;
+			data = get_rbd(buf);
+		} else {
+			data = get_rbd(droq->recv_buf_list
+				       [droq->refill_idx].buffer);
+		}
+
+		droq->recv_buf_list[droq->refill_idx].data = data;
+
+		desc_ring[droq->refill_idx].buffer_ptr =
+			lio_map_ring(octeon_dev->pci_dev,
+				     droq->recv_buf_list[droq->
+				     refill_idx].buffer,
+				     droq->buffer_size);
+
+		/* Reset any previous values in the length field. */
+		droq->info_list[droq->refill_idx].length = 0;
+
+		INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
+		desc_refilled++;
+		droq->refill_count--;
+	}
+
+	if (droq->refill_count)
+		desc_refilled +=
+			octeon_droq_refill_pullup_descs(droq, desc_ring);
+
+	/* if droq->refill_count
+	 * The refill count would not change in pass two. We only moved buffers
+	 * to close the gap in the ring, but we would still have the same no. of
+	 * buffers to refill.
+	 */
+	return desc_refilled;
+}
+
+static inline u32
+octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
+{
+	u32 buf_cnt = 0;
+
+	while (total_len > (buf_size * buf_cnt))
+		buf_cnt++;
+	return buf_cnt;
+}
+
+static int
+octeon_droq_dispatch_pkt(struct octeon_device *oct,
+			 struct octeon_droq *droq,
+			 union octeon_rh *rh,
+			 struct octeon_droq_info *info)
+{
+	u32 cnt;
+	octeon_dispatch_fn_t disp_fn;
+	struct octeon_recv_info *rinfo;
+
+	cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
+
+	disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
+				      (u16)rh->r.subcode);
+	if (disp_fn) {
+		rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
+		if (rinfo) {
+			struct __dispatch *rdisp = rinfo->rsvd;
+
+			rdisp->rinfo = rinfo;
+			rdisp->disp_fn = disp_fn;
+			rinfo->recv_pkt->rh = *rh;
+			list_add_tail(&rdisp->list,
+				      &droq->dispatch_list);
+		} else {
+			droq->stats.dropped_nomem++;
+		}
+	} else {
+		dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+		droq->stats.dropped_nodispatch++;
+	}                       /* else (dispatch_fn ... */
+
+	return cnt;
+}
+
+static inline void octeon_droq_drop_packets(struct octeon_device *oct,
+					    struct octeon_droq *droq,
+					    u32 cnt)
+{
+	u32 i = 0, buf_cnt;
+	struct octeon_droq_info *info;
+
+	for (i = 0; i < cnt; i++) {
+		info = &droq->info_list[droq->read_idx];
+		octeon_swap_8B_data((u64 *)info, 2);
+
+		if (info->length) {
+			info->length -= OCT_RH_SIZE;
+			droq->stats.bytes_received += info->length;
+			buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
+							   (u32)info->length);
+		} else {
+			dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
+			buf_cnt = 1;
+		}
+
+		INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+		droq->refill_count += buf_cnt;
+	}
+}
+
+static u32
+octeon_droq_fast_process_packets(struct octeon_device *oct,
+				 struct octeon_droq *droq,
+				 u32 pkts_to_process)
+{
+	struct octeon_droq_info *info;
+	union octeon_rh *rh;
+	u32 pkt, total_len = 0, pkt_count;
+
+	pkt_count = pkts_to_process;
+
+	for (pkt = 0; pkt < pkt_count; pkt++) {
+		u32 pkt_len = 0;
+		struct sk_buff *nicbuf = NULL;
+
+		info = &droq->info_list[droq->read_idx];
+		octeon_swap_8B_data((u64 *)info, 2);
+
+		if (!info->length) {
+			dev_err(&oct->pci_dev->dev,
+				"DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+				droq->q_no, droq->read_idx, pkt_count);
+			print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
+					     (u8 *)info,
+					     OCT_DROQ_INFO_SIZE);
+			break;
+		}
+
+		/* Len of resp hdr in included in the received data len. */
+		info->length -= OCT_RH_SIZE;
+		rh = &info->rh;
+
+		total_len += (u32)info->length;
+
+		if (OPCODE_SLOW_PATH(rh)) {
+			u32 buf_cnt;
+
+			buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
+			INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+			droq->refill_count += buf_cnt;
+		} else {
+			if (info->length <= droq->buffer_size) {
+				lio_unmap_ring(oct->pci_dev,
+					       (u64)droq->desc_ring[
+					       droq->read_idx].buffer_ptr,
+					       droq->buffer_size);
+				pkt_len = (u32)info->length;
+				nicbuf = droq->recv_buf_list[
+					droq->read_idx].buffer;
+				droq->recv_buf_list[droq->read_idx].buffer =
+					NULL;
+				INCR_INDEX_BY1(droq->read_idx, droq->max_count);
+				skb_put(nicbuf, pkt_len);
+				droq->refill_count++;
+			} else {
+				nicbuf = octeon_fast_packet_alloc(oct, droq,
+								  droq->q_no,
+								  (u32)
+								  info->length);
+				pkt_len = 0;
+				/* nicbuf allocation can fail. We'll handle it
+				 * inside the loop.
+				 */
+				while (pkt_len < info->length) {
+					int cpy_len;
+
+					cpy_len = ((pkt_len +
+						droq->buffer_size) >
+						info->length) ?
+						((u32)info->length - pkt_len) :
+						droq->buffer_size;
+
+					if (nicbuf) {
+						lio_unmap_ring(oct->pci_dev,
+							       (u64)
+							       droq->desc_ring
+							       [droq->read_idx].
+							       buffer_ptr,
+							       droq->
+							       buffer_size);
+						octeon_fast_packet_next(droq,
+									nicbuf,
+									cpy_len,
+									droq->
+									read_idx
+									);
+					}
+
+					pkt_len += cpy_len;
+					INCR_INDEX_BY1(droq->read_idx,
+						       droq->max_count);
+					droq->refill_count++;
+				}
+			}
+
+			if (nicbuf) {
+				if (droq->ops.fptr)
+					droq->ops.fptr(oct->octeon_id,
+					nicbuf, pkt_len,
+					rh, &droq->napi);
+				else
+					recv_buffer_free(nicbuf);
+			}
+		}
+
+		if (droq->refill_count >= droq->refill_threshold) {
+			int desc_refilled = octeon_droq_refill(oct, droq);
+
+			/* Flush the droq descriptor data to memory to be sure
+			* that when we update the credits the data in memory
+			* is accurate.
+			*/
+			wmb();
+			writel((desc_refilled), droq->pkts_credit_reg);
+			/* make sure mmio write completes */
+			mmiowb();
+		}
+
+	}                       /* for ( each packet )... */
+
+	/* Increment refill_count by the number of buffers processed. */
+	droq->stats.pkts_received += pkt;
+	droq->stats.bytes_received += total_len;
+
+	if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
+		octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
+
+		droq->stats.dropped_toomany += (pkts_to_process - pkt);
+		return pkts_to_process;
+	}
+
+	return pkt;
+}
+
+int
+octeon_droq_process_packets(struct octeon_device *oct,
+			    struct octeon_droq *droq,
+			    u32 budget)
+{
+	u32 pkt_count = 0, pkts_processed = 0;
+	struct list_head *tmp, *tmp2;
+
+	pkt_count = atomic_read(&droq->pkts_pending);
+	if (!pkt_count)
+		return 0;
+
+	if (pkt_count > budget)
+		pkt_count = budget;
+
+	/* Grab the lock */
+	spin_lock(&droq->lock);
+
+	pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
+
+	atomic_sub(pkts_processed, &droq->pkts_pending);
+
+	/* Release the spin lock */
+	spin_unlock(&droq->lock);
+
+	list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+		struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+		list_del(tmp);
+		rdisp->disp_fn(rdisp->rinfo,
+			       octeon_get_dispatch_arg
+			       (oct,
+				(u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+				(u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+	}
+
+	/* If there are packets pending. schedule tasklet again */
+	if (atomic_read(&droq->pkts_pending))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * Utility function to poll for packets. check_hw_for_packets must be
+ * called before calling this routine.
+ */
+
+static int
+octeon_droq_process_poll_pkts(struct octeon_device *oct,
+			      struct octeon_droq *droq, u32 budget)
+{
+	struct list_head *tmp, *tmp2;
+	u32 pkts_available = 0, pkts_processed = 0;
+	u32 total_pkts_processed = 0;
+
+	if (budget > droq->max_count)
+		budget = droq->max_count;
+
+	spin_lock(&droq->lock);
+
+	while (total_pkts_processed < budget) {
+		pkts_available =
+			CVM_MIN((budget - total_pkts_processed),
+				(u32)(atomic_read(&droq->pkts_pending)));
+
+		if (pkts_available == 0)
+			break;
+
+		pkts_processed =
+			octeon_droq_fast_process_packets(oct, droq,
+							 pkts_available);
+
+		atomic_sub(pkts_processed, &droq->pkts_pending);
+
+		total_pkts_processed += pkts_processed;
+
+		octeon_droq_check_hw_for_pkts(oct, droq);
+	}
+
+	spin_unlock(&droq->lock);
+
+	list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+		struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+		list_del(tmp);
+		rdisp->disp_fn(rdisp->rinfo,
+			       octeon_get_dispatch_arg
+			       (oct,
+				(u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+				(u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+	}
+
+	return total_pkts_processed;
+}
+
+int
+octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
+			     u32 arg)
+{
+	struct octeon_droq *droq;
+	struct octeon_config *oct_cfg = NULL;
+
+	oct_cfg = octeon_get_conf(oct);
+
+	if (!oct_cfg)
+		return -EINVAL;
+
+	if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+		dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+			__func__, q_no, (oct->num_oqs - 1));
+		return -EINVAL;
+	}
+
+	droq = oct->droq[q_no];
+
+	if (cmd == POLL_EVENT_PROCESS_PKTS)
+		return octeon_droq_process_poll_pkts(oct, droq, arg);
+
+	if (cmd == POLL_EVENT_PENDING_PKTS) {
+		u32 pkt_cnt = atomic_read(&droq->pkts_pending);
+
+		return  octeon_droq_process_packets(oct, droq, pkt_cnt);
+	}
+
+	if (cmd == POLL_EVENT_ENABLE_INTR) {
+		u32 value;
+		unsigned long flags;
+
+		/* Enable Pkt Interrupt */
+		switch (oct->chip_id) {
+		case OCTEON_CN66XX:
+		case OCTEON_CN68XX: {
+			struct octeon_cn6xxx *cn6xxx =
+				(struct octeon_cn6xxx *)oct->chip;
+			spin_lock_irqsave
+				(&cn6xxx->lock_for_droq_int_enb_reg, flags);
+			value =
+				octeon_read_csr(oct,
+						CN6XXX_SLI_PKT_TIME_INT_ENB);
+			value |= (1 << q_no);
+			octeon_write_csr(oct,
+					 CN6XXX_SLI_PKT_TIME_INT_ENB,
+					 value);
+			value =
+				octeon_read_csr(oct,
+						CN6XXX_SLI_PKT_CNT_INT_ENB);
+			value |= (1 << q_no);
+			octeon_write_csr(oct,
+					 CN6XXX_SLI_PKT_CNT_INT_ENB,
+					 value);
+
+			/* don't bother flushing the enables */
+
+			spin_unlock_irqrestore
+				(&cn6xxx->lock_for_droq_int_enb_reg, flags);
+			return 0;
+		}
+		break;
+		}
+
+		return 0;
+	}
+
+	dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd);
+	return -EINVAL;
+}
+
+int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
+			     struct octeon_droq_ops *ops)
+{
+	struct octeon_droq *droq;
+	unsigned long flags;
+	struct octeon_config *oct_cfg = NULL;
+
+	oct_cfg = octeon_get_conf(oct);
+
+	if (!oct_cfg)
+		return -EINVAL;
+
+	if (!(ops)) {
+		dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+		dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+			__func__, q_no, (oct->num_oqs - 1));
+		return -EINVAL;
+	}
+
+	droq = oct->droq[q_no];
+
+	spin_lock_irqsave(&droq->lock, flags);
+
+	memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
+
+	spin_unlock_irqrestore(&droq->lock, flags);
+
+	return 0;
+}
+
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
+{
+	unsigned long flags;
+	struct octeon_droq *droq;
+	struct octeon_config *oct_cfg = NULL;
+
+	oct_cfg = octeon_get_conf(oct);
+
+	if (!oct_cfg)
+		return -EINVAL;
+
+	if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+		dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+			__func__, q_no, oct->num_oqs - 1);
+		return -EINVAL;
+	}
+
+	droq = oct->droq[q_no];
+
+	if (!droq) {
+		dev_info(&oct->pci_dev->dev,
+			 "Droq id (%d) not available.\n", q_no);
+		return 0;
+	}
+
+	spin_lock_irqsave(&droq->lock, flags);
+
+	droq->ops.fptr = NULL;
+	droq->ops.drop_on_max = 0;
+
+	spin_unlock_irqrestore(&droq->lock, flags);
+
+	return 0;
+}
+
+int octeon_create_droq(struct octeon_device *oct,
+		       u32 q_no, u32 num_descs,
+		       u32 desc_size, void *app_ctx)
+{
+	struct octeon_droq *droq;
+
+	if (oct->droq[q_no]) {
+		dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
+			q_no);
+		return 1;
+	}
+
+	/* Allocate the DS for the new droq. */
+	droq = vmalloc(sizeof(*droq));
+	if (!droq)
+		goto create_droq_fail;
+	memset(droq, 0, sizeof(struct octeon_droq));
+
+	/*Disable the pkt o/p for this Q  */
+	octeon_set_droq_pkt_op(oct, q_no, 0);
+	oct->droq[q_no] = droq;
+
+	/* Initialize the Droq */
+	octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
+
+	oct->num_oqs++;
+
+	dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
+		oct->num_oqs);
+
+	/* Global Droq register settings */
+
+	/* As of now not required, as setting are done for all 32 Droqs at
+	 * the same time.
+	 */
+	return 0;
+
+create_droq_fail:
+	octeon_delete_droq(oct, q_no);
+	return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
new file mode 100644
index 0000000..7940cce
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -0,0 +1,426 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*!  \file  octeon_droq.h
+ *   \brief Implementation of Octeon Output queues. "Output" is with
+ *   respect to the Octeon device on the NIC. From this driver's point of
+ *   view they are ingress queues.
+ */
+
+#ifndef __OCTEON_DROQ_H__
+#define __OCTEON_DROQ_H__
+
+/* Default number of packets that will be processed in one iteration. */
+#define MAX_PACKET_BUDGET 0xFFFFFFFF
+
+/** Octeon descriptor format.
+ *  The descriptor ring is made of descriptors which have 2 64-bit values:
+ *  -# Physical (bus) address of the data buffer.
+ *  -# Physical (bus) address of a octeon_droq_info structure.
+ *  The Octeon device DMA's incoming packets and its information at the address
+ *  given by these descriptor fields.
+ */
+struct octeon_droq_desc {
+	/** The buffer pointer */
+	u64 buffer_ptr;
+
+	/** The Info pointer */
+	u64 info_ptr;
+};
+
+#define OCT_DROQ_DESC_SIZE    (sizeof(struct octeon_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ *  The format of the information available at Info Pointer after Octeon
+ *  has posted a packet. Not all descriptors have valid information. Only
+ *  the Info field of the first descriptor for a packet has information
+ *  about the packet.
+ */
+struct octeon_droq_info {
+	/** The Output Receive Header. */
+	union octeon_rh rh;
+
+	/** The Length of the packet. */
+	u64 length;
+};
+
+#define OCT_DROQ_INFO_SIZE   (sizeof(struct octeon_droq_info))
+
+/** Pointer to data buffer.
+ *  Driver keeps a pointer to the data buffer that it made available to
+ *  the Octeon device. Since the descriptor ring keeps physical (bus)
+ *  addresses, this field is required for the driver to keep track of
+ *  the virtual address pointers.
+*/
+struct octeon_recv_buffer {
+	/** Packet buffer, including metadata. */
+	void *buffer;
+
+	/** Data in the packet buffer.  */
+	u8 *data;
+};
+
+#define OCT_DROQ_RECVBUF_SIZE    (sizeof(struct octeon_recv_buffer))
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct oct_droq_stats {
+	/** Number of packets received in this queue. */
+	u64 pkts_received;
+
+	/** Bytes received by this queue. */
+	u64 bytes_received;
+
+	/** Packets dropped due to no dispatch function. */
+	u64 dropped_nodispatch;
+
+	/** Packets dropped due to no memory available. */
+	u64 dropped_nomem;
+
+	/** Packets dropped due to large number of pkts to process. */
+	u64 dropped_toomany;
+
+	/** Number of packets  sent to stack from this queue. */
+	u64 rx_pkts_received;
+
+	/** Number of Bytes sent to stack from this queue. */
+	u64 rx_bytes_received;
+
+	/** Num of Packets dropped due to receive path failures. */
+	u64 rx_dropped;
+};
+
+#define POLL_EVENT_INTR_ARRIVED  1
+#define POLL_EVENT_PROCESS_PKTS  2
+#define POLL_EVENT_PENDING_PKTS  3
+#define POLL_EVENT_ENABLE_INTR   4
+
+/* The maximum number of buffers that can be dispatched from the
+ * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that
+ * max packet size from DROQ is 64K.
+ */
+#define    MAX_RECV_BUFS    64
+
+/** Receive Packet format used when dispatching output queue packets
+ *  with non-raw opcodes.
+ *  The received packet will be sent to the upper layers using this
+ *  structure which is passed as a parameter to the dispatch function
+ */
+struct octeon_recv_pkt {
+	/**  Number of buffers in this received packet */
+	u16 buffer_count;
+
+	/** Id of the device that is sending the packet up */
+	u16 octeon_id;
+
+	/** Length of data in the packet buffer */
+	u32 length;
+
+	/** The receive header */
+	union octeon_rh rh;
+
+	/** Pointer to the OS-specific packet buffer */
+	void *buffer_ptr[MAX_RECV_BUFS];
+
+	/** Size of the buffers pointed to by ptr's in buffer_ptr */
+	u32 buffer_size[MAX_RECV_BUFS];
+};
+
+#define OCT_RECV_PKT_SIZE    (sizeof(struct octeon_recv_pkt))
+
+/** The first parameter of a dispatch function.
+ *  For a raw mode opcode, the driver dispatches with the device
+ *  pointer in this structure.
+ *  For non-raw mode opcode, the driver dispatches the recv_pkt
+ *  created to contain the buffers with data received from Octeon.
+ *  ---------------------
+ *  |     *recv_pkt ----|---
+ *  |-------------------|   |
+ *  | 0 or more bytes   |   |
+ *  | reserved by driver|   |
+ *  |-------------------|<-/
+ *  | octeon_recv_pkt   |
+ *  |                   |
+ *  |___________________|
+ */
+struct octeon_recv_info {
+	void *rsvd;
+	struct octeon_recv_pkt *recv_pkt;
+};
+
+#define  OCT_RECV_INFO_SIZE    (sizeof(struct octeon_recv_info))
+
+/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info
+ *  structure is filled in before this call returns.
+ *  @param extra_bytes - extra bytes to be allocated at the end of the recv info
+ *                       structure.
+ *  @return - pointer to a newly allocated recv_info structure.
+ */
+static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes)
+{
+	struct octeon_recv_info *recv_info;
+	u8 *buf;
+
+	buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE +
+		      extra_bytes, GFP_ATOMIC);
+	if (!buf)
+		return NULL;
+
+	recv_info = (struct octeon_recv_info *)buf;
+	recv_info->recv_pkt =
+		(struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE);
+	recv_info->rsvd = NULL;
+	if (extra_bytes)
+		recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE;
+
+	return recv_info;
+}
+
+/** Free a recv_info structure.
+ *  @param recv_info - Pointer to receive_info to be freed
+ */
+static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info)
+{
+	kfree(recv_info);
+}
+
+typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *);
+
+/** Used by NIC module to register packet handler and to get device
+ * information for each octeon device.
+ */
+struct octeon_droq_ops {
+	/** This registered function will be called by the driver with
+	 *  the octeon id, pointer to buffer from droq and length of
+	 *  data in the buffer. The receive header gives the port
+	 *  number to the caller.  Function pointer is set by caller.
+	 */
+	void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+
+	/* This function will be called by the driver for all NAPI related
+	 * events. The first param is the octeon id. The second param is the
+	 * output queue number. The third is the NAPI event that occurred.
+	 */
+	void (*napi_fn)(void *);
+
+	u32 poll_mode;
+
+	/** Flag indicating if the DROQ handler should drop packets that
+	 *  it cannot handle in one iteration. Set by caller.
+	 */
+	u32 drop_on_max;
+};
+
+/** The Descriptor Ring Output Queue structure.
+ *  This structure has all the information required to implement a
+ *  Octeon DROQ.
+ */
+struct octeon_droq {
+	/** A spinlock to protect access to this ring. */
+	spinlock_t lock;
+
+	u32 q_no;
+
+	struct octeon_droq_ops ops;
+
+	struct octeon_device *oct_dev;
+
+	/** The 8B aligned descriptor ring starts at this address. */
+	struct octeon_droq_desc *desc_ring;
+
+	/** Index in the ring where the driver should read the next packet */
+	u32 read_idx;
+
+	/** Index in the ring where Octeon will write the next packet */
+	u32 write_idx;
+
+	/** Index in the ring where the driver will refill the descriptor's
+	 * buffer
+	 */
+	u32 refill_idx;
+
+	/** Packets pending to be processed */
+	atomic_t pkts_pending;
+
+	/** Number of  descriptors in this ring. */
+	u32 max_count;
+
+	/** The number of descriptors pending refill. */
+	u32 refill_count;
+
+	u32 pkts_per_intr;
+	u32 refill_threshold;
+
+	/** The max number of descriptors in DROQ without a buffer.
+	 * This field is used to keep track of empty space threshold. If the
+	 * refill_count reaches this value, the DROQ cannot accept a max-sized
+	 * (64K) packet.
+	 */
+	u32 max_empty_descs;
+
+	/** The 8B aligned info ptrs begin from this address. */
+	struct octeon_droq_info *info_list;
+
+	/** The receive buffer list. This list has the virtual addresses of the
+	 * buffers.
+	 */
+	struct octeon_recv_buffer *recv_buf_list;
+
+	/** The size of each buffer pointed by the buffer pointer. */
+	u32 buffer_size;
+
+	/** Pointer to the mapped packet credit register.
+	 * Host writes number of info/buffer ptrs available to this register
+	 */
+	void  __iomem *pkts_credit_reg;
+
+	/** Pointer to the mapped packet sent register.
+	 * Octeon writes the number of packets DMA'ed to host memory
+	 * in this register.
+	 */
+	void __iomem *pkts_sent_reg;
+
+	struct list_head dispatch_list;
+
+	/** Statistics for this DROQ. */
+	struct oct_droq_stats stats;
+
+	/** DMA mapped address of the DROQ descriptor ring. */
+	size_t desc_ring_dma;
+
+	/** Info ptr list are allocated at this virtual address. */
+	size_t info_base_addr;
+
+	/** DMA mapped address of the info list */
+	size_t info_list_dma;
+
+	/** Allocated size of info list. */
+	u32 info_alloc_size;
+
+	/** application context */
+	void *app_ctx;
+
+	struct napi_struct napi;
+
+	u32 cpu_id;
+
+	struct call_single_data csd;
+};
+
+#define OCT_DROQ_SIZE   (sizeof(struct octeon_droq))
+
+/**
+ *  Allocates space for the descriptor ring for the droq and sets the
+ *   base addr, num desc etc in Octeon registers.
+ *
+ * @param  oct_dev    - pointer to the octeon device structure
+ * @param  q_no       - droq no. ranges from 0 - 3.
+ * @param app_ctx     - pointer to application context
+ * @return Success: 0    Failure: 1
+*/
+int octeon_init_droq(struct octeon_device *oct_dev,
+		     u32 q_no,
+		     u32 num_descs,
+		     u32 desc_size,
+		     void *app_ctx);
+
+/**
+ *  Frees the space for descriptor ring for the droq.
+ *
+ *  @param oct_dev - pointer to the octeon device structure
+ *  @param q_no    - droq no. ranges from 0 - 3.
+ *  @return:    Success: 0    Failure: 1
+*/
+int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
+
+/** Register a change in droq operations. The ops field has a pointer to a
+ * function which will called by the DROQ handler for all packets arriving
+ * on output queues given by q_no irrespective of the type of packet.
+ * The ops field also has a flag which if set tells the DROQ handler to
+ * drop packets if it receives more than what it can process in one
+ * invocation of the handler.
+ * @param oct       - octeon device
+ * @param q_no      - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @param ops       - the droq_ops settings for this queue
+ * @return          - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int
+octeon_register_droq_ops(struct octeon_device *oct,
+			 u32 q_no,
+			 struct octeon_droq_ops *ops);
+
+/** Resets the function pointer and flag settings made by
+ * octeon_register_droq_ops(). After this routine is called, the DROQ handler
+ * will lookup dispatch function for each arriving packet on the output queue
+ * given by q_no.
+ * @param oct       - octeon device
+ * @param q_no      - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @return          - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
+
+/**   Register a dispatch function for a opcode/subcode. The driver will call
+ *    this dispatch function when it receives a packet with the given
+ *    opcode/subcode in its output queues along with the user specified
+ *    argument.
+ *    @param  oct        - the octeon device to register with.
+ *    @param  opcode     - the opcode for which the dispatch will be registered.
+ *    @param  subcode    - the subcode for which the dispatch will be registered
+ *    @param  fn         - the dispatch function.
+ *    @param  fn_arg     - user specified that will be passed along with the
+ *                         dispatch function by the driver.
+ *    @return Success: 0; Failure: 1
+ */
+int octeon_register_dispatch_fn(struct octeon_device *oct,
+				u16 opcode,
+				u16 subcode,
+				octeon_dispatch_fn_t fn, void *fn_arg);
+
+/**  Remove registration for an opcode/subcode. This will delete the mapping for
+ *   an opcode/subcode. The dispatch function will be unregistered and will no
+ *   longer be called if a packet with the opcode/subcode arrives in the driver
+ *   output queues.
+ *   @param  oct        -  the octeon device to unregister from.
+ *   @param  opcode     -  the opcode to be unregistered.
+ *   @param  subcode    -  the subcode to be unregistered.
+ *
+ *   @return Success: 0; Failure: 1
+ */
+int octeon_unregister_dispatch_fn(struct octeon_device *oct,
+				  u16 opcode,
+				  u16 subcode);
+
+void octeon_droq_print_stats(void);
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+				  struct octeon_droq *droq);
+
+int octeon_create_droq(struct octeon_device *oct, u32 q_no,
+		       u32 num_descs, u32 desc_size, void *app_ctx);
+
+int octeon_droq_process_packets(struct octeon_device *oct,
+				struct octeon_droq *droq,
+				u32 budget);
+
+int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
+				 int cmd, u32 arg);
+
+#endif	/*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
new file mode 100644
index 0000000..592fe49
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -0,0 +1,319 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*!  \file  octeon_iq.h
+ *   \brief Host Driver: Implementation of Octeon input queues. "Input" is
+ *   with respect to the Octeon device on the NIC. From this driver's
+ *   point of view they are egress queues.
+ */
+
+#ifndef __OCTEON_IQ_H__
+#define  __OCTEON_IQ_H__
+
+#define IQ_STATUS_RUNNING   1
+
+#define IQ_SEND_OK          0
+#define IQ_SEND_STOP        1
+#define IQ_SEND_FAILED     -1
+
+/*-------------------------  INSTRUCTION QUEUE --------------------------*/
+
+/* \cond */
+
+#define REQTYPE_NONE                 0
+#define REQTYPE_NORESP_NET           1
+#define REQTYPE_NORESP_NET_SG        2
+#define REQTYPE_RESP_NET             3
+#define REQTYPE_RESP_NET_SG          4
+#define REQTYPE_SOFT_COMMAND         5
+#define REQTYPE_LAST                 5
+
+struct octeon_request_list {
+	u32 reqtype;
+	void *buf;
+};
+
+/* \endcond */
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct oct_iq_stats {
+	u64 instr_posted; /**< Instructions posted to this queue. */
+	u64 instr_processed; /**< Instructions processed in this queue. */
+	u64 instr_dropped; /**< Instructions that could not be processed */
+	u64 bytes_sent;  /**< Bytes sent through this queue. */
+	u64 sgentry_sent;/**< Gather entries sent through this queue. */
+	u64 tx_done;/**< Num of packets sent to network. */
+	u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
+	u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
+	u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+};
+
+#define OCT_IQ_STATS_SIZE   (sizeof(struct oct_iq_stats))
+
+/** The instruction (input) queue.
+ *  The input queue is used to post raw (instruction) mode data or packet
+ *  data to Octeon device from the host. Each input queue (upto 4) for
+ *  a Octeon device has one such structure to represent it.
+*/
+struct octeon_instr_queue {
+	/** A spinlock to protect access to the input ring.  */
+	spinlock_t lock;
+
+	/** Flag that indicates if the queue uses 64 byte commands. */
+	u32 iqcmd_64B:1;
+
+	/** Queue Number. */
+	u32 iq_no:5;
+
+	u32 rsvd:17;
+
+	/* Controls the periodic flushing of iq */
+	u32 do_auto_flush:1;
+
+	u32 status:8;
+
+	/** Maximum no. of instructions in this queue. */
+	u32 max_count;
+
+	/** Index in input ring where the driver should write the next packet */
+	u32 host_write_index;
+
+	/** Index in input ring where Octeon is expected to read the next
+	 * packet.
+	 */
+	u32 octeon_read_index;
+
+	/** This index aids in finding the window in the queue where Octeon
+	  * has read the commands.
+	  */
+	u32 flush_index;
+
+	/** This field keeps track of the instructions pending in this queue. */
+	atomic_t instr_pending;
+
+	u32 reset_instr_cnt;
+
+	/** Pointer to the Virtual Base addr of the input ring. */
+	u8 *base_addr;
+
+	struct octeon_request_list *request_list;
+
+	/** Octeon doorbell register for the ring. */
+	void __iomem *doorbell_reg;
+
+	/** Octeon instruction count register for this ring. */
+	void __iomem *inst_cnt_reg;
+
+	/** Number of instructions pending to be posted to Octeon. */
+	u32 fill_cnt;
+
+	/** The max. number of instructions that can be held pending by the
+	 * driver.
+	 */
+	u32 fill_threshold;
+
+	/** The last time that the doorbell was rung. */
+	u64 last_db_time;
+
+	/** The doorbell timeout. If the doorbell was not rung for this time and
+	  * fill_cnt is non-zero, ring the doorbell again.
+	  */
+	u32 db_timeout;
+
+	/** Statistics for this input queue. */
+	struct oct_iq_stats stats;
+
+	/** DMA mapped base address of the input descriptor ring. */
+	u64 base_addr_dma;
+
+	/** Application context */
+	void *app_ctx;
+};
+
+/*----------------------  INSTRUCTION FORMAT ----------------------------*/
+
+/** 32-byte instruction format.
+ *  Format of instruction for a 32-byte mode input queue.
+ */
+struct octeon_instr_32B {
+	/** Pointer where the input data is available. */
+	u64 dptr;
+
+	/** Instruction Header.  */
+	u64 ih;
+
+	/** Pointer where the response for a RAW mode packet will be written
+	 * by Octeon.
+	 */
+	u64 rptr;
+
+	/** Input Request Header. Additional info about the input. */
+	u64 irh;
+
+};
+
+#define OCT_32B_INSTR_SIZE     (sizeof(struct octeon_instr_32B))
+
+/** 64-byte instruction format.
+ *  Format of instruction for a 64-byte mode input queue.
+ */
+struct octeon_instr_64B {
+	/** Pointer where the input data is available. */
+	u64 dptr;
+
+	/** Instruction Header. */
+	u64 ih;
+
+	/** Input Request Header. */
+	u64 irh;
+
+	/** opcode/subcode specific parameters */
+	u64 ossp[2];
+
+	/** Return Data Parameters */
+	u64 rdp;
+
+	/** Pointer where the response for a RAW mode packet will be written
+	 * by Octeon.
+	 */
+	u64 rptr;
+
+	u64 reserved;
+
+};
+
+#define OCT_64B_INSTR_SIZE     (sizeof(struct octeon_instr_64B))
+
+/** The size of each buffer in soft command buffer pool
+ */
+#define  SOFT_COMMAND_BUFFER_SIZE	1024
+
+struct octeon_soft_command {
+	/** Soft command buffer info. */
+	struct list_head node;
+	u64 dma_addr;
+	u32 size;
+
+	/** Command and return status */
+	struct octeon_instr_64B cmd;
+#define COMPLETION_WORD_INIT    0xffffffffffffffffULL
+	u64 *status_word;
+
+	/** Data buffer info */
+	void *virtdptr;
+	u64 dmadptr;
+	u32 datasize;
+
+	/** Return buffer info */
+	void *virtrptr;
+	u64 dmarptr;
+	u32 rdatasize;
+
+	/** Context buffer info */
+	void *ctxptr;
+	u32  ctxsize;
+
+	/** Time out and callback */
+	size_t wait_time;
+	size_t timeout;
+	u32 iq_no;
+	void (*callback)(struct octeon_device *, u32, void *);
+	void *callback_arg;
+};
+
+/** Maximum number of buffers to allocate into soft command buffer pool
+ */
+#define  MAX_SOFT_COMMAND_BUFFERS	16
+
+/** Head of a soft command buffer pool.
+ */
+struct octeon_sc_buffer_pool {
+	/** List structure to add delete pending entries to */
+	struct list_head head;
+
+	/** A lock for this response list */
+	spinlock_t lock;
+
+	atomic_t alloc_buf_count;
+};
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_buffer_pool(struct octeon_device *oct);
+struct octeon_soft_command *
+	octeon_alloc_soft_command(struct octeon_device *oct,
+				  u32 datasize, u32 rdatasize,
+				  u32 ctxsize);
+void octeon_free_soft_command(struct octeon_device *oct,
+			      struct octeon_soft_command *sc);
+
+/**
+ *  octeon_init_instr_queue()
+ *  @param octeon_dev      - pointer to the octeon device structure.
+ *  @param iq_no           - queue to be initialized (0 <= q_no <= 3).
+ *
+ *  Called at driver init time for each input queue. iq_conf has the
+ *  configuration parameters for the queue.
+ *
+ *  @return  Success: 0   Failure: 1
+ */
+int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+			    u32 num_descs);
+
+/**
+ *  octeon_delete_instr_queue()
+ *  @param octeon_dev      - pointer to the octeon device structure.
+ *  @param iq_no           - queue to be deleted (0 <= q_no <= 3).
+ *
+ *  Called at driver unload time for each input queue. Deletes all
+ *  allocated resources for the input queue.
+ *
+ *  @return  Success: 0   Failure: 1
+ */
+int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no);
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct);
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+				void (*fn)(void *));
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+			    struct octeon_instr_queue *iq);
+
+int octeon_send_command(struct octeon_device *oct, u32 iq_no,
+			u32 force_db, void *cmd, void *buf,
+			u32 datasize, u32 reqtype);
+
+void octeon_prepare_soft_command(struct octeon_device *oct,
+				 struct octeon_soft_command *sc,
+				 u8 opcode, u8 subcode,
+				 u32 irh_ossp, u64 ossp0,
+				 u64 ossp1);
+
+int octeon_send_soft_command(struct octeon_device *oct,
+			     struct octeon_soft_command *sc);
+
+int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
+		    u32 num_descs, void *app_ctx);
+
+#endif				/* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
new file mode 100644
index 0000000..cbd0819
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -0,0 +1,237 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_main.h
+ *  \brief Host Driver: This file is included by all host driver source files
+ *  to include common definitions.
+ */
+
+#ifndef _OCTEON_MAIN_H_
+#define  _OCTEON_MAIN_H_
+
+#if BITS_PER_LONG == 32
+#define CVM_CAST64(v) ((long long)(v))
+#elif BITS_PER_LONG == 64
+#define CVM_CAST64(v) ((long long)(long)(v))
+#else
+#error "Unknown system architecture"
+#endif
+
+#define DRV_NAME "LiquidIO"
+
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns  1 = enabled. 0 otherwise
+ */
+int octeon_console_debug_enabled(u32 console);
+
+/* BQL-related functions */
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+					  unsigned int *pkts_compl,
+					  unsigned int *bytes_compl);
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+					unsigned int bytes_compl);
+
+/** Swap 8B blocks */
+static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
+{
+	while (blocks) {
+		cpu_to_be64s(data);
+		blocks--;
+		data++;
+	}
+}
+
+/**
+  * \brief unmaps a PCI BAR
+  * @param oct Pointer to Octeon device
+  * @param baridx bar index
+  */
+static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
+{
+	dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
+		baridx);
+
+	if (oct->mmio[baridx].done)
+		iounmap(oct->mmio[baridx].hw_addr);
+
+	if (oct->mmio[baridx].start)
+		pci_release_region(oct->pci_dev, baridx * 2);
+}
+
+/**
+ * \brief maps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ * @param max_map_len maximum length of mapped memory
+ */
+static inline int octeon_map_pci_barx(struct octeon_device *oct,
+				      int baridx, int max_map_len)
+{
+	u32 mapped_len = 0;
+
+	if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
+		dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
+			baridx);
+		return 1;
+	}
+
+	oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
+	oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
+
+	mapped_len = oct->mmio[baridx].len;
+	if (!mapped_len)
+		return 1;
+
+	if (max_map_len && (mapped_len > max_map_len))
+		mapped_len = max_map_len;
+
+	oct->mmio[baridx].hw_addr =
+		ioremap(oct->mmio[baridx].start, mapped_len);
+	oct->mmio[baridx].mapped_len = mapped_len;
+
+	dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
+		baridx, oct->mmio[baridx].start, mapped_len,
+		oct->mmio[baridx].len);
+
+	if (!oct->mmio[baridx].hw_addr) {
+		dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
+			baridx);
+		return 1;
+	}
+	oct->mmio[baridx].done = 1;
+
+	return 0;
+}
+
+static inline void *
+cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
+			u32 size,
+			u32 *alloc_size,
+			size_t *orig_ptr,
+			size_t *dma_addr __attribute__((unused)))
+{
+	int retries = 0;
+	void *ptr = NULL;
+
+#define OCTEON_MAX_ALLOC_RETRIES     1
+	do {
+		ptr =
+		    (void *)__get_free_pages(GFP_KERNEL,
+					     get_order(size));
+		if ((unsigned long)ptr & 0x07) {
+			free_pages((unsigned long)ptr, get_order(size));
+			ptr = NULL;
+			/* Increment the size required if the first
+			 * attempt failed.
+			 */
+			if (!retries)
+				size += 7;
+		}
+		retries++;
+	} while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
+
+	*alloc_size = size;
+	*orig_ptr = (unsigned long)ptr;
+	if ((unsigned long)ptr & 0x07)
+		ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
+	return ptr;
+}
+
+#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
+		free_pages(orig_ptr, get_order(size))
+
+static inline void
+sleep_cond(wait_queue_head_t *wait_queue, int *condition)
+{
+	wait_queue_t we;
+
+	init_waitqueue_entry(&we, current);
+	add_wait_queue(wait_queue, &we);
+	while (!(ACCESS_ONCE(*condition))) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current))
+			goto out;
+		schedule();
+	}
+out:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(wait_queue, &we);
+}
+
+static inline void
+sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
+{
+	wait_queue_t we;
+
+	init_waitqueue_entry(&we, current);
+	add_wait_queue(waitq, &we);
+	while (!atomic_read(pcond)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current))
+			goto out;
+		schedule();
+	}
+out:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(waitq, &we);
+}
+
+/* Gives up the CPU for a timeout period.
+ * Check that the condition is not true before we go to sleep for a
+ * timeout period.
+ */
+static inline void
+sleep_timeout_cond(wait_queue_head_t *wait_queue,
+		   int *condition,
+		   int timeout)
+{
+	wait_queue_t we;
+
+	init_waitqueue_entry(&we, current);
+	add_wait_queue(wait_queue, &we);
+	set_current_state(TASK_INTERRUPTIBLE);
+	if (!(*condition))
+		schedule_timeout(timeout);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(wait_queue, &we);
+}
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#ifndef ROUNDUP128
+#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
+#endif
+
+#endif /* _OCTEON_MAIN_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
new file mode 100644
index 0000000..5aecef8
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -0,0 +1,199 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+#define MEMOPS_IDX   MAX_BAR1_MAP_INDEX
+
+static inline void
+octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
+			    u32 idx __attribute__((unused)))
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32 mask;
+
+	mask = oct->fn_list.bar1_idx_read(oct, idx);
+	mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
+	oct->fn_list.bar1_idx_write(oct, idx, mask);
+#endif
+}
+
+static void
+octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
+		     u8 *hostbuf, u32 len)
+{
+	while ((len) && ((unsigned long)mapped_addr) & 7) {
+		writeb(*(hostbuf++), mapped_addr++);
+		len--;
+	}
+
+	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+	while (len >= 8) {
+		writeq(*((u64 *)hostbuf), mapped_addr);
+		mapped_addr += 8;
+		hostbuf += 8;
+		len -= 8;
+	}
+
+	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+	while (len--)
+		writeb(*(hostbuf++), mapped_addr++);
+}
+
+static void
+octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
+		    u8 *hostbuf, u32 len)
+{
+	while ((len) && ((unsigned long)mapped_addr) & 7) {
+		*(hostbuf++) = readb(mapped_addr++);
+		len--;
+	}
+
+	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+	while (len >= 8) {
+		*((u64 *)hostbuf) = readq(mapped_addr);
+		mapped_addr += 8;
+		hostbuf += 8;
+		len -= 8;
+	}
+
+	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+	while (len--)
+		*(hostbuf++) = readb(mapped_addr++);
+}
+
+/* Core mem read/write with temporary bar1 settings. */
+/* op = 1 to read, op = 0 to write. */
+static void
+__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
+			 u8 *hostbuf, u32 len, u32 op)
+{
+	u32 copy_len = 0, index_reg_val = 0;
+	unsigned long flags;
+	u8 __iomem *mapped_addr;
+
+	spin_lock_irqsave(&oct->mem_access_lock, flags);
+
+	/* Save the original index reg value. */
+	index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
+	do {
+		oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
+		mapped_addr = oct->mmio[1].hw_addr
+		    + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
+
+		/* If operation crosses a 4MB boundary, split the transfer
+		 * at the 4MB
+		 * boundary.
+		 */
+		if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
+			copy_len = (u32)(((addr & ~(0x3fffff)) +
+				   (MEMOPS_IDX << 22)) - addr);
+		} else {
+			copy_len = len;
+		}
+
+		if (op) {	/* read from core */
+			octeon_pci_fastread(oct, mapped_addr, hostbuf,
+					    copy_len);
+		} else {
+			octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
+					     copy_len);
+		}
+
+		len -= copy_len;
+		addr += copy_len;
+		hostbuf += copy_len;
+
+	} while (len);
+
+	oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
+
+	spin_unlock_irqrestore(&oct->mem_access_lock, flags);
+}
+
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+			 u64 coreaddr,
+			 u8 *buf,
+			 u32 len)
+{
+	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
+}
+
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+			  u64 coreaddr,
+			  u8 *buf,
+			  u32 len)
+{
+	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
+}
+
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
+{
+	__be64 ret;
+
+	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
+
+	return be64_to_cpu(ret);
+}
+
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
+{
+	__be32 ret;
+
+	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
+
+	return be32_to_cpu(ret);
+}
+
+void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
+			       u32 val)
+{
+	__be32 t = cpu_to_be32(val);
+
+	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
new file mode 100644
index 0000000..11b1833
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -0,0 +1,75 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*!  \file octeon_mem_ops.h
+ *   \brief Host Driver: Routines used to read/write Octeon memory.
+ */
+
+#ifndef __OCTEON_MEM_OPS_H__
+#define __OCTEON_MEM_OPS_H__
+
+/**  Read a 64-bit value from a BAR1 mapped core memory address.
+ *   @param  oct        -  pointer to the octeon device.
+ *   @param  core_addr  -  the address to read from.
+ *
+ *   The range_idx gives the BAR1 index register for the range of address
+ *   in which core_addr is mapped.
+ *
+ *   @return  64-bit value read from Core memory
+ */
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr);
+
+/**  Read a 32-bit value from a BAR1 mapped core memory address.
+ *   @param  oct        -  pointer to the octeon device.
+ *   @param  core_addr  -  the address to read from.
+ *
+ *   @return  32-bit value read from Core memory
+ */
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr);
+
+/**  Write a 32-bit value to a BAR1 mapped core memory address.
+ *   @param  oct        -  pointer to the octeon device.
+ *   @param  core_addr  -  the address to write to.
+ *   @param  val        -  32-bit value to write.
+ */
+void
+octeon_write_device_mem32(struct octeon_device *oct,
+			  u64 core_addr,
+			  u32 val);
+
+/** Read multiple bytes from Octeon memory.
+ */
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+			 u64 coreaddr,
+			 u8 *buf,
+			 u32 len);
+
+/** Write multiple bytes into Octeon memory.
+ */
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+			  u64 coreaddr,
+			  u8 *buf,
+			  u32 len);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
new file mode 100644
index 0000000..b3abe58
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -0,0 +1,224 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*!  \file  octeon_network.h
+ *   \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
+ */
+
+#ifndef __OCTEON_NETWORK_H__
+#define __OCTEON_NETWORK_H__
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/ptp_clock_kernel.h>
+
+/** LiquidIO per-interface network private data */
+struct lio {
+	/** State of the interface. Rx/Tx happens only in the RUNNING state.  */
+	atomic_t ifstate;
+
+	/** Octeon Interface index number. This device will be represented as
+	 *  oct<ifidx> in the system.
+	 */
+	int ifidx;
+
+	/** Octeon Input queue to use to transmit for this network interface. */
+	int txq;
+
+	/** Octeon Output queue from which pkts arrive
+	 * for this network interface.
+	 */
+	int rxq;
+
+	/** Guards the glist */
+	spinlock_t lock;
+
+	/** Linked list of gather components */
+	struct list_head glist;
+
+	/** Pointer to the NIC properties for the Octeon device this network
+	 *  interface is associated with.
+	 */
+	struct octdev_props *octprops;
+
+	/** Pointer to the octeon device structure. */
+	struct octeon_device *oct_dev;
+
+	struct net_device *netdev;
+
+	/** Link information sent by the core application for this interface. */
+	struct oct_link_info linfo;
+
+	/** Size of Tx queue for this octeon device. */
+	u32 tx_qsize;
+
+	/** Size of Rx queue for this octeon device. */
+	u32 rx_qsize;
+
+	/** Size of MTU this octeon device. */
+	u32 mtu;
+
+	/** msg level flag per interface. */
+	u32 msg_enable;
+
+	/** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
+	u64 dev_capability;
+
+	/** Copy of beacaon reg in phy */
+	u32 phy_beacon_val;
+
+	/** Copy of ctrl reg in phy */
+	u32 led_ctrl_val;
+
+	/* PTP clock information */
+	struct ptp_clock_info ptp_info;
+	struct ptp_clock *ptp_clock;
+	s64 ptp_adjust;
+
+	/* for atomic access to Octeon PTP reg and data struct */
+	spinlock_t ptp_lock;
+
+	/* Interface info */
+	u32	intf_open;
+
+	/* work queue for  txq status */
+	struct cavium_wq	txq_status_wq;
+
+};
+
+#define LIO_SIZE         (sizeof(struct lio))
+#define GET_LIO(netdev)  ((struct lio *)netdev_priv(netdev))
+
+/**
+ * \brief Enable or disable feature
+ * @param netdev    pointer to network device
+ * @param cmd       Command that just requires acknowledgment
+ */
+int liquidio_set_feature(struct net_device *netdev, int cmd);
+
+/**
+ * \brief Link control command completion callback
+ * @param nctrl_ptr pointer to control packet structure
+ *
+ * This routine is called by the callback function when a ctrl pkt sent to
+ * core app completes. The nctrl_ptr contains a copy of the command type
+ * and data sent to the core app. This routine is only called if the ctrl
+ * pkt was sent successfully to the core app.
+ */
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
+
+/**
+ * \brief Register ethtool operations
+ * @param netdev    pointer to network device
+ */
+void liquidio_set_ethtool_ops(struct net_device *netdev);
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
+		   u32 q_no __attribute__((unused)), u32 size)
+{
+#define SKB_ADJ_MASK  0x3F
+#define SKB_ADJ       (SKB_ADJ_MASK + 1)
+
+	struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+
+	if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+		u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+		skb_reserve(skb, r);
+	}
+
+	return (void *)skb;
+}
+
+static inline void recv_buffer_free(void *buffer)
+{
+	dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+#define lio_dma_alloc(oct, size, dma_addr) \
+	dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
+#define lio_dma_free(oct, size, virt_addr, dma_addr) \
+	dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
+
+#define   get_rbd(ptr)      (((struct sk_buff *)(ptr))->data)
+
+static inline u64
+lio_map_ring_info(struct octeon_droq *droq, u32 i)
+{
+	dma_addr_t dma_addr;
+	struct octeon_device *oct = droq->oct_dev;
+
+	dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
+				  OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
+
+	BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+
+	return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring_info(struct pci_dev *pci_dev,
+		    u64 info_ptr, u32 size)
+{
+	dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+}
+
+static inline u64
+lio_map_ring(struct pci_dev *pci_dev,
+	     void *buf, u32 size)
+{
+	dma_addr_t dma_addr;
+
+	dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
+				  DMA_FROM_DEVICE);
+
+	BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+
+	return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring(struct pci_dev *pci_dev,
+	       u64 buf_ptr, u32 size)
+{
+	dma_unmap_single(&pci_dev->dev,
+			 buf_ptr, size,
+			 DMA_FROM_DEVICE);
+}
+
+static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
+					     struct octeon_droq *droq,
+					     u32 q_no, u32 size)
+{
+	return recv_buffer_alloc(oct, q_no, size);
+}
+
+static inline void octeon_fast_packet_next(struct octeon_droq *droq,
+					   struct sk_buff *nicbuf,
+					   int copy_len,
+					   int idx)
+{
+	memcpy(skb_put(nicbuf, copy_len),
+	       get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
+}
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
new file mode 100644
index 0000000..1a01915
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -0,0 +1,189 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+void *
+octeon_alloc_soft_command_resp(struct octeon_device    *oct,
+			       struct octeon_instr_64B *cmd,
+			       size_t		       rdatasize)
+{
+	struct octeon_soft_command *sc;
+	struct octeon_instr_ih  *ih;
+	struct octeon_instr_irh *irh;
+	struct octeon_instr_rdp *rdp;
+
+	sc = (struct octeon_soft_command *)
+		octeon_alloc_soft_command(oct, 0, rdatasize, 0);
+
+	if (!sc)
+		return NULL;
+
+	/* Copy existing command structure into the soft command */
+	memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+
+	/* Add in the response related fields. Opcode and Param are already
+	 * there.
+	 */
+	ih      = (struct octeon_instr_ih *)&sc->cmd.ih;
+	ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+
+	irh        = (struct octeon_instr_irh *)&sc->cmd.irh;
+	irh->rflag = 1; /* a response is required */
+	irh->len   = 4; /* means four 64-bit words immediately follow irh */
+
+	rdp            = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+	rdp->pcie_port = oct->pcie_port;
+	rdp->rlen      = rdatasize;
+
+	*sc->status_word = COMPLETION_WORD_INIT;
+
+	sc->wait_time = 1000;
+	sc->timeout = jiffies + sc->wait_time;
+
+	return sc;
+}
+
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+			     struct octnic_data_pkt *ndata,
+			     u32 xmit_more)
+{
+	int ring_doorbell;
+
+	ring_doorbell = !xmit_more;
+
+	return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
+				   ndata->buf, ndata->datasize,
+				   ndata->reqtype);
+}
+
+static void octnet_link_ctrl_callback(struct octeon_device *oct,
+				      u32 status,
+				      void *sc_ptr)
+{
+	struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr;
+	struct octnic_ctrl_pkt *nctrl;
+
+	nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
+
+	/* Call the callback function if status is OK.
+	 * Status is OK only if a response was expected and core returned
+	 * success.
+	 * If no response was expected, status is OK if the command was posted
+	 * successfully.
+	 */
+	if (!status && nctrl->cb_fn)
+		nctrl->cb_fn(nctrl);
+
+	octeon_free_soft_command(oct, sc);
+}
+
+static inline struct octeon_soft_command
+*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
+			  struct octnic_ctrl_pkt *nctrl,
+			  struct octnic_ctrl_params nparams)
+{
+	struct octeon_soft_command *sc = NULL;
+	u8 *data;
+	size_t rdatasize;
+	u32 uddsize = 0, datasize = 0;
+
+	uddsize = (u32)(nctrl->ncmd.s.more * 8);
+
+	datasize = OCTNET_CMD_SIZE + uddsize;
+	rdatasize = (nctrl->wait_time) ? 16 : 0;
+
+	sc = (struct octeon_soft_command *)
+		octeon_alloc_soft_command(oct, datasize, rdatasize,
+					  sizeof(struct octnic_ctrl_pkt));
+
+	if (!sc)
+		return NULL;
+
+	memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt));
+
+	data = (u8 *)sc->virtdptr;
+
+	memcpy(data, &nctrl->ncmd,  OCTNET_CMD_SIZE);
+
+	octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
+
+	if (uddsize) {
+		/* Endian-Swap for UDD should have been done by caller. */
+		memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
+	}
+
+	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
+				    0, 0, 0);
+
+	sc->callback = octnet_link_ctrl_callback;
+	sc->callback_arg = sc;
+	sc->wait_time = nctrl->wait_time;
+
+	return sc;
+}
+
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+			 struct octnic_ctrl_pkt *nctrl,
+			 struct octnic_ctrl_params nparams)
+{
+	int retval;
+	struct octeon_soft_command *sc = NULL;
+
+	sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+	if (!sc) {
+		dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
+			__func__);
+		return -1;
+	}
+
+	retval = octeon_send_soft_command(oct, sc);
+	if (retval) {
+		octeon_free_soft_command(oct, sc);
+		dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
+			__func__, retval);
+		return -1;
+	}
+
+	return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
new file mode 100644
index 0000000..0238857
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -0,0 +1,227 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*!  \file octeon_nic.h
+ *   \brief Host NIC Driver: Routine to send network data &
+ *   control packet to Octeon.
+ */
+
+#ifndef __OCTEON_NIC_H__
+#define  __OCTEON_NIC_H__
+
+/* Maximum number of 8-byte words can be sent in a NIC control message.
+ */
+#define  MAX_NCTRL_UDD  32
+
+typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *);
+
+/* Structure of control information passed by the NIC module to the OSI
+ * layer when sending control commands to Octeon device software.
+ */
+struct octnic_ctrl_pkt {
+	/** Command to be passed to the Octeon device software. */
+	union octnet_cmd ncmd;
+
+	/** Send buffer  */
+	void *data;
+	u64 dmadata;
+
+	/** Response buffer */
+	void *rdata;
+	u64 dmardata;
+
+	/** Additional data that may be needed by some commands. */
+	u64 udd[MAX_NCTRL_UDD];
+
+	/** Time to wait for Octeon software to respond to this control command.
+	 *  If wait_time is 0, OSI assumes no response is expected.
+	 */
+	size_t wait_time;
+
+	/** The network device that issued the control command. */
+	u64 netpndev;
+
+	/** Callback function called when the command has been fetched */
+	octnic_ctrl_pkt_cb_fn_t cb_fn;
+};
+
+#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd))
+
+/** Structure of data information passed by the NIC module to the OSI
+ * layer when forwarding data to Octeon device software.
+ */
+struct octnic_data_pkt {
+	/** Pointer to information maintained by NIC module for this packet. The
+	 *  OSI layer passes this as-is to the driver.
+	 */
+	void *buf;
+
+	/** Type of buffer passed in "buf" above. */
+	u32 reqtype;
+
+	/** Total data bytes to be transferred in this command. */
+	u32 datasize;
+
+	/** Command to be passed to the Octeon device software. */
+	struct octeon_instr_64B cmd;
+
+	/** Input queue to use to send this command. */
+	u32 q_no;
+
+};
+
+/** Structure passed by NIC module to OSI layer to prepare a command to send
+ * network data to Octeon.
+ */
+union octnic_cmd_setup {
+	struct {
+		u32 ifidx:8;
+		u32 cksum_offset:7;
+		u32 gather:1;
+		u32 timestamp:1;
+		u32 ipv4opts_ipv6exthdr:2;
+		u32 ip_csum:1;
+		u32 tnl_csum:1;
+
+		u32 rsvd:11;
+		union {
+			u32 datasize;
+			u32 gatherptrs;
+		} u;
+	} s;
+
+	u64 u64;
+
+};
+
+struct octnic_ctrl_params {
+	u32 resp_order;
+};
+
+static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
+{
+	return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
+		>= (oct->instr_queue[q_no]->max_count - 2));
+}
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
+		       union octnic_cmd_setup *setup, u32 tag)
+{
+	struct octeon_instr_ih *ih;
+	struct octeon_instr_irh *irh;
+	union octnic_packet_params packet_params;
+
+	memset(cmd, 0, sizeof(struct octeon_instr_64B));
+
+	ih = (struct octeon_instr_ih *)&cmd->ih;
+
+	/* assume that rflag is cleared so therefore front data will only have
+	 * irh and ossp[1] and ossp[2] for a total of 24 bytes
+	 */
+	ih->fsz = 24;
+
+	ih->tagtype = ORDERED_TAG;
+	ih->grp = DEFAULT_POW_GRP;
+
+	if (tag)
+		ih->tag = tag;
+	else
+		ih->tag = LIO_DATA(setup->s.ifidx);
+
+	ih->raw = 1;
+	ih->qos = (setup->s.ifidx & 3) + 4;	/* map qos based on interface */
+
+	if (!setup->s.gather) {
+		ih->dlengsz = setup->s.u.datasize;
+	} else {
+		ih->gather = 1;
+		ih->dlengsz = setup->s.u.gatherptrs;
+	}
+
+	irh = (struct octeon_instr_irh *)&cmd->irh;
+
+	irh->opcode = OPCODE_NIC;
+	irh->subcode = OPCODE_NIC_NW_DATA;
+
+	packet_params.u32 = 0;
+
+	if (setup->s.cksum_offset) {
+		packet_params.s.csoffset = setup->s.cksum_offset;
+		packet_params.s.ipv4opts_ipv6exthdr =
+						setup->s.ipv4opts_ipv6exthdr;
+	}
+
+	packet_params.s.ip_csum = setup->s.ip_csum;
+	packet_params.s.tnl_csum = setup->s.tnl_csum;
+	packet_params.s.ifidx = setup->s.ifidx;
+	packet_params.s.tsflag = setup->s.timestamp;
+
+	irh->ossp = packet_params.u32;
+}
+
+/** Allocate and a soft command with space for a response immediately following
+ * the commnad.
+ * @param oct - octeon device pointer
+ * @param cmd - pointer to the command structure, pre-filled for everything
+ * except the response.
+ * @param rdatasize - size in bytes of the response.
+ *
+ * @returns pointer to allocated buffer with command copied into it, and
+ * response space immediately following.
+ */
+void *
+octeon_alloc_soft_command_resp(struct octeon_device    *oct,
+			       struct octeon_instr_64B *cmd,
+			       size_t		       rdatasize);
+
+/** Send a NIC data packet to the device
+ * @param oct - octeon device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+			     struct octnic_data_pkt *ndata, u32 xmit_more);
+
+/** Send a NIC control packet to the device
+ * @param oct - octeon device pointer
+ * @param nctrl - control structure with command, timout, and callback info
+ * @param nparams - response control structure
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+			 struct octnic_ctrl_pkt *nctrl,
+			 struct octnic_ctrl_params nparams);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
new file mode 100644
index 0000000..356796b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -0,0 +1,766 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
+	(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
+struct iq_post_status {
+	int status;
+	int index;
+};
+
+static void check_db_timeout(struct work_struct *work);
+static void  __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+
+static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
+
+static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
+{
+	struct octeon_instr_queue *iq =
+	    (struct octeon_instr_queue *)oct->instr_queue[iq_no];
+	return iq->iqcmd_64B;
+}
+
+#define IQ_INSTR_MODE_32B(oct, iq_no)  (!IQ_INSTR_MODE_64B(oct, iq_no))
+
+/* Define this to return the request status comaptible to old code */
+/*#define OCTEON_USE_OLD_REQ_STATUS*/
+
+/* Return 0 on success, 1 on failure */
+int octeon_init_instr_queue(struct octeon_device *oct,
+			    u32 iq_no, u32 num_descs)
+{
+	struct octeon_instr_queue *iq;
+	struct octeon_iq_config *conf = NULL;
+	u32 q_size;
+	struct cavium_wq *db_wq;
+
+	if (OCTEON_CN6XXX(oct))
+		conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
+
+	if (!conf) {
+		dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
+			oct->chip_id);
+		return 1;
+	}
+
+	if (num_descs & (num_descs - 1)) {
+		dev_err(&oct->pci_dev->dev,
+			"Number of descriptors for instr queue %d not in power of 2.\n",
+			iq_no);
+		return 1;
+	}
+
+	q_size = (u32)conf->instr_type * num_descs;
+
+	iq = oct->instr_queue[iq_no];
+
+	iq->base_addr = lio_dma_alloc(oct, q_size,
+				      (dma_addr_t *)&iq->base_addr_dma);
+	if (!iq->base_addr) {
+		dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
+			iq_no);
+		return 1;
+	}
+
+	iq->max_count = num_descs;
+
+	/* Initialize a list to holds requests that have been posted to Octeon
+	 * but has yet to be fetched by octeon
+	 */
+	iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+	if (!iq->request_list) {
+		lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+		dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
+			iq_no);
+		return 1;
+	}
+
+	memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
+
+	dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
+		iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+
+	iq->iq_no = iq_no;
+	iq->fill_threshold = (u32)conf->db_min;
+	iq->fill_cnt = 0;
+	iq->host_write_index = 0;
+	iq->octeon_read_index = 0;
+	iq->flush_index = 0;
+	iq->last_db_time = 0;
+	iq->do_auto_flush = 1;
+	iq->db_timeout = (u32)conf->db_timeout;
+	atomic_set(&iq->instr_pending, 0);
+
+	/* Initialize the spinlock for this instruction queue */
+	spin_lock_init(&iq->lock);
+
+	oct->io_qmask.iq |= (1 << iq_no);
+
+	/* Set the 32B/64B mode for each input queue */
+	oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+	iq->iqcmd_64B = (conf->instr_type == 64);
+
+	oct->fn_list.setup_iq_regs(oct, iq_no);
+
+	oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+	if (!oct->check_db_wq[iq_no].wq) {
+		lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+		dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
+			iq_no);
+		return 1;
+	}
+
+	db_wq = &oct->check_db_wq[iq_no];
+
+	INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
+	db_wq->wk.ctxptr = oct;
+	db_wq->wk.ctxul = iq_no;
+	queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+
+	return 0;
+}
+
+int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
+{
+	u64 desc_size = 0, q_size;
+	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+	cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
+	flush_workqueue(oct->check_db_wq[iq_no].wq);
+	destroy_workqueue(oct->check_db_wq[iq_no].wq);
+
+	if (OCTEON_CN6XXX(oct))
+		desc_size =
+		    CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+
+	if (iq->request_list)
+		vfree(iq->request_list);
+
+	if (iq->base_addr) {
+		q_size = iq->max_count * desc_size;
+		lio_dma_free(oct, (u32)q_size, iq->base_addr,
+			     iq->base_addr_dma);
+		return 0;
+	}
+	return 1;
+}
+
+/* Return 0 on success, 1 on failure */
+int octeon_setup_iq(struct octeon_device *oct,
+		    u32 iq_no,
+		    u32 num_descs,
+		    void *app_ctx)
+{
+	if (oct->instr_queue[iq_no]) {
+		dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
+			iq_no);
+		oct->instr_queue[iq_no]->app_ctx = app_ctx;
+		return 0;
+	}
+	oct->instr_queue[iq_no] =
+	    vmalloc(sizeof(struct octeon_instr_queue));
+	if (!oct->instr_queue[iq_no])
+		return 1;
+
+	memset(oct->instr_queue[iq_no], 0,
+	       sizeof(struct octeon_instr_queue));
+
+	oct->instr_queue[iq_no]->app_ctx = app_ctx;
+	if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+		vfree(oct->instr_queue[iq_no]);
+		oct->instr_queue[iq_no] = NULL;
+		return 1;
+	}
+
+	oct->num_iqs++;
+	oct->fn_list.enable_io_queues(oct);
+	return 0;
+}
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct)
+{
+	int i, retry = 1000, pending, instr_cnt = 0;
+
+	do {
+		instr_cnt = 0;
+
+		/*for (i = 0; i < oct->num_iqs; i++) {*/
+		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+			if (!(oct->io_qmask.iq & (1UL << i)))
+				continue;
+			pending =
+			    atomic_read(&oct->
+					       instr_queue[i]->instr_pending);
+			if (pending)
+				__check_db_timeout(oct, i);
+			instr_cnt += pending;
+		}
+
+		if (instr_cnt == 0)
+			break;
+
+		schedule_timeout_uninterruptible(1);
+
+	} while (retry-- && instr_cnt);
+
+	return instr_cnt;
+}
+
+static inline void
+ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+	if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
+		writel(iq->fill_cnt, iq->doorbell_reg);
+		/* make sure doorbell write goes through */
+		mmiowb();
+		iq->fill_cnt = 0;
+		iq->last_db_time = jiffies;
+		return;
+	}
+}
+
+static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
+				      u8 *cmd)
+{
+	u8 *iqptr, cmdsize;
+
+	cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+	iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+	memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline int
+__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
+	       struct octeon_instr_queue *iq,
+	       u32 force_db __attribute__((unused)), u8 *cmd)
+{
+	u32 index = -1;
+
+	/* This ensures that the read index does not wrap around to the same
+	 * position if queue gets full before Octeon could fetch any instr.
+	 */
+	if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
+		return -1;
+
+	__copy_cmd_into_iq(iq, cmd);
+
+	/* "index" is returned, host_write_index is modified. */
+	index = iq->host_write_index;
+	INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+	iq->fill_cnt++;
+
+	/* Flush the command into memory. We need to be sure the data is in
+	 * memory before indicating that the instruction is pending.
+	 */
+	wmb();
+
+	atomic_inc(&iq->instr_pending);
+
+	return index;
+}
+
+static inline struct iq_post_status
+__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
+		struct octeon_instr_queue *iq,
+		u32 force_db __attribute__((unused)), u8 *cmd)
+{
+	struct iq_post_status st;
+
+	st.status = IQ_SEND_OK;
+
+	/* This ensures that the read index does not wrap around to the same
+	 * position if queue gets full before Octeon could fetch any instr.
+	 */
+	if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
+		st.status = IQ_SEND_FAILED;
+		st.index = -1;
+		return st;
+	}
+
+	if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
+		st.status = IQ_SEND_STOP;
+
+	__copy_cmd_into_iq(iq, cmd);
+
+	/* "index" is returned, host_write_index is modified. */
+	st.index = iq->host_write_index;
+	INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+	iq->fill_cnt++;
+
+	/* Flush the command into memory. We need to be sure the data is in
+	 * memory before indicating that the instruction is pending.
+	 */
+	wmb();
+
+	atomic_inc(&iq->instr_pending);
+
+	return st;
+}
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+				void (*fn)(void *))
+{
+	if (reqtype > REQTYPE_LAST) {
+		dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
+			__func__, reqtype);
+		return -EINVAL;
+	}
+
+	reqtype_free_fn[oct->octeon_id][reqtype] = fn;
+
+	return 0;
+}
+
+static inline void
+__add_to_request_list(struct octeon_instr_queue *iq,
+		      int idx, void *buf, int reqtype)
+{
+	iq->request_list[idx].buf = buf;
+	iq->request_list[idx].reqtype = reqtype;
+}
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+			    struct octeon_instr_queue *iq)
+{
+	int reqtype;
+	void *buf;
+	u32 old = iq->flush_index;
+	u32 inst_count = 0;
+	unsigned pkts_compl = 0, bytes_compl = 0;
+	struct octeon_soft_command *sc;
+	struct octeon_instr_irh *irh;
+
+	while (old != iq->octeon_read_index) {
+		reqtype = iq->request_list[old].reqtype;
+		buf     = iq->request_list[old].buf;
+
+		if (reqtype == REQTYPE_NONE)
+			goto skip_this;
+
+		octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
+						     &bytes_compl);
+
+		switch (reqtype) {
+		case REQTYPE_NORESP_NET:
+		case REQTYPE_NORESP_NET_SG:
+		case REQTYPE_RESP_NET_SG:
+			reqtype_free_fn[oct->octeon_id][reqtype](buf);
+			break;
+		case REQTYPE_RESP_NET:
+		case REQTYPE_SOFT_COMMAND:
+			sc = buf;
+
+			irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+			if (irh->rflag) {
+				/* We're expecting a response from Octeon.
+				 * It's up to lio_process_ordered_list() to
+				 * process  sc. Add sc to the ordered soft
+				 * command response list because we expect
+				 * a response from Octeon.
+				 */
+				spin_lock_bh(&oct->response_list
+					[OCTEON_ORDERED_SC_LIST].lock);
+				atomic_inc(&oct->response_list
+					[OCTEON_ORDERED_SC_LIST].
+					pending_req_count);
+				list_add_tail(&sc->node, &oct->response_list
+					[OCTEON_ORDERED_SC_LIST].head);
+				spin_unlock_bh(&oct->response_list
+					[OCTEON_ORDERED_SC_LIST].lock);
+			} else {
+				if (sc->callback) {
+					sc->callback(oct, OCTEON_REQUEST_DONE,
+						     sc->callback_arg);
+				}
+			}
+			break;
+		default:
+			dev_err(&oct->pci_dev->dev,
+				"%s Unknown reqtype: %d buf: %p at idx %d\n",
+				__func__, reqtype, buf, old);
+		}
+
+		iq->request_list[old].buf = NULL;
+		iq->request_list[old].reqtype = 0;
+
+ skip_this:
+		inst_count++;
+		INCR_INDEX_BY1(old, iq->max_count);
+	}
+	if (bytes_compl)
+		octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
+						   bytes_compl);
+	iq->flush_index = old;
+
+	return inst_count;
+}
+
+static inline void
+update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+	u32 inst_processed = 0;
+
+	/* Calculate how many commands Octeon has read and move the read index
+	 * accordingly.
+	 */
+	iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+
+	/* Move the NORESPONSE requests to the per-device completion list. */
+	if (iq->flush_index != iq->octeon_read_index)
+		inst_processed = lio_process_iq_request_list(oct, iq);
+
+	if (inst_processed) {
+		atomic_sub(inst_processed, &iq->instr_pending);
+		iq->stats.instr_processed += inst_processed;
+	}
+}
+
+static void
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+		u32 pending_thresh)
+{
+	if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
+		spin_lock_bh(&iq->lock);
+		update_iq_indices(oct, iq);
+		spin_unlock_bh(&iq->lock);
+	}
+}
+
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+{
+	struct octeon_instr_queue *iq;
+	u64 next_time;
+
+	if (!oct)
+		return;
+	iq = oct->instr_queue[iq_no];
+	if (!iq)
+		return;
+
+	/* If jiffies - last_db_time < db_timeout do nothing  */
+	next_time = iq->last_db_time + iq->db_timeout;
+	if (!time_after(jiffies, (unsigned long)next_time))
+		return;
+	iq->last_db_time = jiffies;
+
+	/* Get the lock and prevent tasklets. This routine gets called from
+	 * the poll thread. Instructions can now be posted in tasklet context
+	 */
+	spin_lock_bh(&iq->lock);
+	if (iq->fill_cnt != 0)
+		ring_doorbell(oct, iq);
+
+	spin_unlock_bh(&iq->lock);
+
+	/* Flush the instruction queue */
+	if (iq->do_auto_flush)
+		octeon_flush_iq(oct, iq, 1);
+}
+
+/* Called by the Poll thread at regular intervals to check the instruction
+ * queue for commands to be posted and for commands that were fetched by Octeon.
+ */
+static void check_db_timeout(struct work_struct *work)
+{
+	struct cavium_wk *wk = (struct cavium_wk *)work;
+	struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+	unsigned long iq_no = wk->ctxul;
+	struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+
+	__check_db_timeout(oct, iq_no);
+	queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+}
+
+int
+octeon_send_command(struct octeon_device *oct, u32 iq_no,
+		    u32 force_db, void *cmd, void *buf,
+		    u32 datasize, u32 reqtype)
+{
+	struct iq_post_status st;
+	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+	spin_lock_bh(&iq->lock);
+
+	st = __post_command2(oct, iq, force_db, cmd);
+
+	if (st.status != IQ_SEND_FAILED) {
+		octeon_report_sent_bytes_to_bql(buf, reqtype);
+		__add_to_request_list(iq, st.index, buf, reqtype);
+		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
+		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
+
+		if (iq->fill_cnt >= iq->fill_threshold || force_db)
+			ring_doorbell(oct, iq);
+	} else {
+		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
+	}
+
+	spin_unlock_bh(&iq->lock);
+
+	if (iq->do_auto_flush)
+		octeon_flush_iq(oct, iq, 2);
+
+	return st.status;
+}
+
+void
+octeon_prepare_soft_command(struct octeon_device *oct,
+			    struct octeon_soft_command *sc,
+			    u8 opcode,
+			    u8 subcode,
+			    u32 irh_ossp,
+			    u64 ossp0,
+			    u64 ossp1)
+{
+	struct octeon_config *oct_cfg;
+	struct octeon_instr_ih *ih;
+	struct octeon_instr_irh *irh;
+	struct octeon_instr_rdp *rdp;
+
+	BUG_ON(opcode > 15);
+	BUG_ON(subcode > 127);
+
+	oct_cfg = octeon_get_conf(oct);
+
+	ih          = (struct octeon_instr_ih *)&sc->cmd.ih;
+	ih->tagtype = ATOMIC_TAG;
+	ih->tag     = LIO_CONTROL;
+	ih->raw     = 1;
+	ih->grp     = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+	if (sc->datasize) {
+		ih->dlengsz = sc->datasize;
+		ih->rs = 1;
+	}
+
+	irh            = (struct octeon_instr_irh *)&sc->cmd.irh;
+	irh->opcode    = opcode;
+	irh->subcode   = subcode;
+
+	/* opcode/subcode specific parameters (ossp) */
+	irh->ossp       = irh_ossp;
+	sc->cmd.ossp[0] = ossp0;
+	sc->cmd.ossp[1] = ossp1;
+
+	if (sc->rdatasize) {
+		rdp            = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+		rdp->pcie_port = oct->pcie_port;
+		rdp->rlen      = sc->rdatasize;
+
+		irh->rflag =  1;
+		irh->len   =  4;
+		ih->fsz    = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+	} else {
+		irh->rflag =  0;
+		irh->len   =  2;
+		ih->fsz    = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+	}
+
+	while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
+		sc->iq_no++;
+}
+
+int octeon_send_soft_command(struct octeon_device *oct,
+			     struct octeon_soft_command *sc)
+{
+	struct octeon_instr_ih *ih;
+	struct octeon_instr_irh *irh;
+	struct octeon_instr_rdp *rdp;
+
+	ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+	if (ih->dlengsz) {
+		BUG_ON(!sc->dmadptr);
+		sc->cmd.dptr = sc->dmadptr;
+	}
+
+	irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+	if (irh->rflag) {
+		BUG_ON(!sc->dmarptr);
+		BUG_ON(!sc->status_word);
+		*sc->status_word = COMPLETION_WORD_INIT;
+
+		rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+		sc->cmd.rptr = sc->dmarptr;
+	}
+
+	if (sc->wait_time)
+		sc->timeout = jiffies + sc->wait_time;
+
+	return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+				   (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+}
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
+{
+	int i;
+	u64 dma_addr;
+	struct octeon_soft_command *sc;
+
+	INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+	spin_lock_init(&oct->sc_buf_pool.lock);
+	atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
+
+	for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
+		sc = (struct octeon_soft_command *)
+			lio_dma_alloc(oct,
+				      SOFT_COMMAND_BUFFER_SIZE,
+					  (dma_addr_t *)&dma_addr);
+		if (!sc)
+			return 1;
+
+		sc->dma_addr = dma_addr;
+		sc->size = SOFT_COMMAND_BUFFER_SIZE;
+
+		list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+	}
+
+	return 0;
+}
+
+int octeon_free_sc_buffer_pool(struct octeon_device *oct)
+{
+	struct list_head *tmp, *tmp2;
+	struct octeon_soft_command *sc;
+
+	spin_lock(&oct->sc_buf_pool.lock);
+
+	list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
+		list_del(tmp);
+
+		sc = (struct octeon_soft_command *)tmp;
+
+		lio_dma_free(oct, sc->size, sc, sc->dma_addr);
+	}
+
+	INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+
+	spin_unlock(&oct->sc_buf_pool.lock);
+
+	return 0;
+}
+
+struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
+						      u32 datasize,
+						      u32 rdatasize,
+						      u32 ctxsize)
+{
+	u64 dma_addr;
+	u32 size;
+	u32 offset = sizeof(struct octeon_soft_command);
+	struct octeon_soft_command *sc = NULL;
+	struct list_head *tmp;
+
+	BUG_ON((offset + datasize + rdatasize + ctxsize) >
+	       SOFT_COMMAND_BUFFER_SIZE);
+
+	spin_lock(&oct->sc_buf_pool.lock);
+
+	if (list_empty(&oct->sc_buf_pool.head)) {
+		spin_unlock(&oct->sc_buf_pool.lock);
+		return NULL;
+	}
+
+	list_for_each(tmp, &oct->sc_buf_pool.head)
+		break;
+
+	list_del(tmp);
+
+	atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
+
+	spin_unlock(&oct->sc_buf_pool.lock);
+
+	sc = (struct octeon_soft_command *)tmp;
+
+	dma_addr = sc->dma_addr;
+	size = sc->size;
+
+	memset(sc, 0, sc->size);
+
+	sc->dma_addr = dma_addr;
+	sc->size = size;
+
+	if (ctxsize) {
+		sc->ctxptr = (u8 *)sc + offset;
+		sc->ctxsize = ctxsize;
+	}
+
+	/* Start data at 128 byte boundary */
+	offset = (offset + ctxsize + 127) & 0xffffff80;
+
+	if (datasize) {
+		sc->virtdptr = (u8 *)sc + offset;
+		sc->dmadptr = dma_addr + offset;
+		sc->datasize = datasize;
+	}
+
+	/* Start rdata at 128 byte boundary */
+	offset = (offset + datasize + 127) & 0xffffff80;
+
+	if (rdatasize) {
+		BUG_ON(rdatasize < 16);
+		sc->virtrptr = (u8 *)sc + offset;
+		sc->dmarptr = dma_addr + offset;
+		sc->rdatasize = rdatasize;
+		sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
+	}
+
+	return sc;
+}
+
+void octeon_free_soft_command(struct octeon_device *oct,
+			      struct octeon_soft_command *sc)
+{
+	spin_lock(&oct->sc_buf_pool.lock);
+
+	list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+
+	atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
+
+	spin_unlock(&oct->sc_buf_pool.lock);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
new file mode 100644
index 0000000..091f537
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -0,0 +1,178 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+static void oct_poll_req_completion(struct work_struct *work);
+
+int octeon_setup_response_list(struct octeon_device *oct)
+{
+	int i, ret = 0;
+	struct cavium_wq *cwq;
+
+	for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
+		INIT_LIST_HEAD(&oct->response_list[i].head);
+		spin_lock_init(&oct->response_list[i].lock);
+		atomic_set(&oct->response_list[i].pending_req_count, 0);
+	}
+
+	oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+	if (!oct->dma_comp_wq.wq) {
+		dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
+		return -ENOMEM;
+	}
+
+	cwq = &oct->dma_comp_wq;
+	INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
+	cwq->wk.ctxptr = oct;
+	queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+
+	return ret;
+}
+
+void octeon_delete_response_list(struct octeon_device *oct)
+{
+	cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
+	flush_workqueue(oct->dma_comp_wq.wq);
+	destroy_workqueue(oct->dma_comp_wq.wq);
+}
+
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+			     u32 force_quit)
+{
+	struct octeon_response_list *ordered_sc_list;
+	struct octeon_soft_command *sc;
+	int request_complete = 0;
+	int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
+	u32 status;
+	u64 status64;
+	struct octeon_instr_rdp *rdp;
+
+	ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
+
+	do {
+		spin_lock_bh(&ordered_sc_list->lock);
+
+		if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+			/* ordered_sc_list is empty; there is
+			 * nothing to process
+			 */
+			spin_unlock_bh
+			    (&ordered_sc_list->lock);
+			return 1;
+		}
+
+		sc = (struct octeon_soft_command *)ordered_sc_list->
+		    head.next;
+		rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+		status = OCTEON_REQUEST_PENDING;
+
+		/* check if octeon has finished DMA'ing a response
+		 * to where rptr is pointing to
+		 */
+		dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
+					sc->cmd.rptr, rdp->rlen,
+					DMA_FROM_DEVICE);
+		status64 = *sc->status_word;
+
+		if (status64 != COMPLETION_WORD_INIT) {
+			if ((status64 & 0xff) != 0xff) {
+				octeon_swap_8B_data(&status64, 1);
+				if (((status64 & 0xff) != 0xff)) {
+					status = (u32)(status64 &
+						       0xffffffffULL);
+				}
+			}
+		} else if (force_quit || (sc->timeout &&
+			time_after(jiffies, (unsigned long)sc->timeout))) {
+			status = OCTEON_REQUEST_TIMEOUT;
+		}
+
+		if (status != OCTEON_REQUEST_PENDING) {
+			/* we have received a response or we have timed out */
+			/* remove node from linked list */
+			list_del(&sc->node);
+			atomic_dec(&octeon_dev->response_list
+					  [OCTEON_ORDERED_SC_LIST].
+					  pending_req_count);
+			spin_unlock_bh
+			    (&ordered_sc_list->lock);
+
+			if (sc->callback)
+				sc->callback(octeon_dev, status,
+					     sc->callback_arg);
+
+			request_complete++;
+
+		} else {
+			/* no response yet */
+			request_complete = 0;
+			spin_unlock_bh
+			    (&ordered_sc_list->lock);
+		}
+
+		/* If we hit the Max Ordered requests to process every loop,
+		 * we quit
+		 * and let this function be invoked the next time the poll
+		 * thread runs
+		 * to process the remaining requests. This function can take up
+		 * the entire CPU if there is no upper limit to the requests
+		 * processed.
+		 */
+		if (request_complete >= resp_to_process)
+			break;
+	} while (request_complete);
+
+	return 0;
+}
+
+static void oct_poll_req_completion(struct work_struct *work)
+{
+	struct cavium_wk *wk = (struct cavium_wk *)work;
+	struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+	struct cavium_wq *cwq = &oct->dma_comp_wq;
+
+	lio_process_ordered_list(oct, 0);
+
+	queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
new file mode 100644
index 0000000..7a48752
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -0,0 +1,140 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file response_manager.h
+ *  \brief Host Driver:  Response queues for host instructions.
+ */
+
+#ifndef __RESPONSE_MANAGER_H__
+#define __RESPONSE_MANAGER_H__
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define  MAX_ORD_REQS_TO_PROCESS   4096
+
+/** Head of a response list. There are several response lists in the
+ *  system. One for each response order- Unordered, ordered
+ *  and 1 for noresponse entries on each instruction queue.
+ */
+struct octeon_response_list {
+	/** List structure to add delete pending entries to */
+	struct list_head head;
+
+	/** A lock for this response list */
+	spinlock_t lock;
+
+	atomic_t pending_req_count;
+};
+
+/** The type of response list.
+ */
+enum {
+	OCTEON_ORDERED_LIST = 0,
+	OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
+	OCTEON_UNORDERED_BLOCKING_LIST = 2,
+	OCTEON_ORDERED_SC_LIST = 3
+};
+
+/** Response Order values for a Octeon Request. */
+enum {
+	OCTEON_RESP_ORDERED = 0,
+	OCTEON_RESP_UNORDERED = 1,
+	OCTEON_RESP_NORESPONSE = 2
+};
+
+/** Error codes  used in Octeon Host-Core communication.
+ *
+ *   31            16 15            0
+ *   ---------------------------------
+ *   |               |               |
+ *   ---------------------------------
+ *   Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ *   are reserved to identify the group to which the error code belongs. The
+ *   lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ *   So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+
+/*------------   Error codes used by host driver   -----------------*/
+#define DRIVER_MAJOR_ERROR_CODE           0x0000
+
+/**  A value of 0x00000000 indicates no error i.e. success */
+#define DRIVER_ERROR_NONE                 0x00000000
+
+/**  (Major number: 0x0000; Minor Number: 0x0001) */
+#define DRIVER_ERROR_REQ_PENDING          0x00000001
+#define DRIVER_ERROR_REQ_TIMEOUT          0x00000003
+#define DRIVER_ERROR_REQ_EINTR            0x00000004
+#define DRIVER_ERROR_REQ_ENXIO            0x00000006
+#define DRIVER_ERROR_REQ_ENOMEM           0x0000000C
+#define DRIVER_ERROR_REQ_EINVAL           0x00000016
+#define DRIVER_ERROR_REQ_FAILED           0x000000ff
+
+/** Status for a request.
+ * If a request is not queued to Octeon by the driver, the driver returns
+ * an error condition that's describe by one of the OCTEON_REQ_ERR_* value
+ * below. If the request is successfully queued, the driver will return
+ * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and
+ * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the
+ * response for request failed to arrive before a time-out period or if
+ * the request processing * got interrupted due to a signal respectively.
+ */
+enum {
+	OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE),
+	OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING),
+	OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT),
+	OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR),
+	OCTEON_REQUEST_NO_DEVICE = (0x00000021),
+	OCTEON_REQUEST_NOT_RUNNING,
+	OCTEON_REQUEST_INVALID_IQ,
+	OCTEON_REQUEST_INVALID_BUFCNT,
+	OCTEON_REQUEST_INVALID_RESP_ORDER,
+	OCTEON_REQUEST_NO_MEMORY,
+	OCTEON_REQUEST_INVALID_BUFSIZE,
+	OCTEON_REQUEST_NO_PENDING_ENTRY,
+	OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF)
+
+};
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param octeon_dev      - the octeon device structure.
+ */
+int octeon_setup_response_list(struct octeon_device *octeon_dev);
+
+void octeon_delete_response_list(struct octeon_device *octeon_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param octeon_dev  - the octeon device structure.
+ * @param force_quit - the request is forced to timeout if this is 1
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+			     u32 force_quit);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644
index 0000000..5c4615c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644
index 0000000..dda8a02
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_H
+#define	NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define	PCI_DEVICE_ID_THUNDER_NIC_PF		0xA01E
+#define	PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF	0x0011
+#define	PCI_DEVICE_ID_THUNDER_NIC_VF		0xA034
+#define	PCI_DEVICE_ID_THUNDER_BGX		0xA026
+
+/* PCI BAR nos */
+#define	PCI_CFG_REG_BAR_NUM		0
+#define	PCI_MSIX_REG_BAR_NUM		4
+
+/* NIC SRIOV VF count */
+#define	MAX_NUM_VFS_SUPPORTED		128
+#define	DEFAULT_NUM_VF_ENABLED		8
+
+#define	NIC_TNS_BYPASS_MODE		0
+#define	NIC_TNS_MODE			1
+
+/* NIC priv flags */
+#define	NIC_SRIOV_ENABLED		BIT(0)
+
+/* Min/Max packet size */
+#define	NIC_HW_MIN_FRS			64
+#define	NIC_HW_MAX_FRS			9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define	NIC_MAX_PKIND			16
+
+/* Rx Channels */
+/* Receive channel configuration in TNS bypass mode
+ * Below is configuration in TNS bypass mode
+ * BGX0-LMAC0-CHAN0 - VNIC CHAN0
+ * BGX0-LMAC1-CHAN0 - VNIC CHAN16
+ * ...
+ * BGX1-LMAC0-CHAN0 - VNIC CHAN128
+ * ...
+ * BGX1-LMAC3-CHAN0 - VNIC CHAN174
+ */
+#define	NIC_INTF_COUNT			2  /* Interfaces btw VNIC and TNS/BGX */
+#define	NIC_CHANS_PER_INF		128
+#define	NIC_MAX_CHANS			(NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+#define	NIC_CPI_COUNT			2048 /* No of channel parse indices */
+
+/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
+#define NIC_MAX_BGX			MAX_BGX_PER_CN88XX
+#define	NIC_CPI_PER_BGX			(NIC_CPI_COUNT / NIC_MAX_BGX)
+#define	NIC_MAX_CPI_PER_LMAC		64 /* Max when CPI_ALG is IP diffserv */
+#define	NIC_RSSI_PER_BGX		(NIC_RSSI_COUNT / NIC_MAX_BGX)
+
+/* Tx scheduling */
+#define	NIC_MAX_TL4			1024
+#define	NIC_MAX_TL4_SHAPERS		256 /* 1 shaper for 4 TL4s */
+#define	NIC_MAX_TL3			256
+#define	NIC_MAX_TL3_SHAPERS		64  /* 1 shaper for 4 TL3s */
+#define	NIC_MAX_TL2			64
+#define	NIC_MAX_TL2_SHAPERS		2  /* 1 shaper for 32 TL2s */
+#define	NIC_MAX_TL1			2
+
+/* TNS bypass mode */
+#define	NIC_TL2_PER_BGX			32
+#define	NIC_TL4_PER_BGX			(NIC_MAX_TL4 / NIC_MAX_BGX)
+#define	NIC_TL4_PER_LMAC		(NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+
+/* NIC VF Interrupts */
+#define	NICVF_INTR_CQ			0
+#define	NICVF_INTR_SQ			1
+#define	NICVF_INTR_RBDR			2
+#define	NICVF_INTR_PKT_DROP		3
+#define	NICVF_INTR_TCP_TIMER		4
+#define	NICVF_INTR_MBOX			5
+#define	NICVF_INTR_QS_ERR		6
+
+#define	NICVF_INTR_CQ_SHIFT		0
+#define	NICVF_INTR_SQ_SHIFT		8
+#define	NICVF_INTR_RBDR_SHIFT		16
+#define	NICVF_INTR_PKT_DROP_SHIFT	20
+#define	NICVF_INTR_TCP_TIMER_SHIFT	21
+#define	NICVF_INTR_MBOX_SHIFT		22
+#define	NICVF_INTR_QS_ERR_SHIFT		23
+
+#define	NICVF_INTR_CQ_MASK		(0xFF << NICVF_INTR_CQ_SHIFT)
+#define	NICVF_INTR_SQ_MASK		(0xFF << NICVF_INTR_SQ_SHIFT)
+#define	NICVF_INTR_RBDR_MASK		(0x03 << NICVF_INTR_RBDR_SHIFT)
+#define	NICVF_INTR_PKT_DROP_MASK	BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define	NICVF_INTR_TCP_TIMER_MASK	BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define	NICVF_INTR_MBOX_MASK		BIT(NICVF_INTR_MBOX_SHIFT)
+#define	NICVF_INTR_QS_ERR_MASK		BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define	NIC_PF_MSIX_VECTORS		10
+#define	NIC_VF_MSIX_VECTORS		20
+
+#define NIC_PF_INTR_ID_ECC0_SBE		0
+#define NIC_PF_INTR_ID_ECC0_DBE		1
+#define NIC_PF_INTR_ID_ECC1_SBE		2
+#define NIC_PF_INTR_ID_ECC1_DBE		3
+#define NIC_PF_INTR_ID_ECC2_SBE		4
+#define NIC_PF_INTR_ID_ECC2_DBE		5
+#define NIC_PF_INTR_ID_ECC3_SBE		6
+#define NIC_PF_INTR_ID_ECC3_DBE		7
+#define NIC_PF_INTR_ID_MBOX0		8
+#define NIC_PF_INTR_ID_MBOX1		9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.05usec = value of 2.2
+ * This 10% would be covered in CQ timer thresh value
+ */
+#define NICPF_CLK_PER_INT_TICK		2
+
+struct nicvf_cq_poll {
+	u8	cq_idx;		/* Completion queue index */
+	struct	napi_struct napi;
+};
+
+#define	NIC_RSSI_COUNT			4096 /* Total no of RSS indices */
+#define NIC_MAX_RSS_HASH_BITS		8
+#define NIC_MAX_RSS_IDR_TBL_SIZE	(1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE		5 /* 320 bit key */
+
+struct nicvf_rss_info {
+	bool enable;
+#define	RSS_L2_EXTENDED_HASH_ENA	BIT(0)
+#define	RSS_IP_HASH_ENA			BIT(1)
+#define	RSS_TCP_HASH_ENA		BIT(2)
+#define	RSS_TCP_SYN_DIS			BIT(3)
+#define	RSS_UDP_HASH_ENA		BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA	BIT(5)
+#define	RSS_ROCE_ENA			BIT(6)
+#define	RSS_L3_BI_DIRECTION_ENA		BIT(7)
+#define	RSS_L4_BI_DIRECTION_ENA		BIT(8)
+	u64 cfg;
+	u8  hash_bits;
+	u16 rss_size;
+	u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+	u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+enum rx_stats_reg_offset {
+	RX_OCTS = 0x0,
+	RX_UCAST = 0x1,
+	RX_BCAST = 0x2,
+	RX_MCAST = 0x3,
+	RX_RED = 0x4,
+	RX_RED_OCTS = 0x5,
+	RX_ORUN = 0x6,
+	RX_ORUN_OCTS = 0x7,
+	RX_FCS = 0x8,
+	RX_L2ERR = 0x9,
+	RX_DRP_BCAST = 0xa,
+	RX_DRP_MCAST = 0xb,
+	RX_DRP_L3BCAST = 0xc,
+	RX_DRP_L3MCAST = 0xd,
+	RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+	TX_OCTS = 0x0,
+	TX_UCAST = 0x1,
+	TX_BCAST = 0x2,
+	TX_MCAST = 0x3,
+	TX_DROP = 0x4,
+	TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+	u64 rx_bytes_ok;
+	u64 rx_ucast_frames_ok;
+	u64 rx_bcast_frames_ok;
+	u64 rx_mcast_frames_ok;
+	u64 rx_fcs_errors;
+	u64 rx_l2_errors;
+	u64 rx_drop_red;
+	u64 rx_drop_red_bytes;
+	u64 rx_drop_overrun;
+	u64 rx_drop_overrun_bytes;
+	u64 rx_drop_bcast;
+	u64 rx_drop_mcast;
+	u64 rx_drop_l3_bcast;
+	u64 rx_drop_l3_mcast;
+	u64 tx_bytes_ok;
+	u64 tx_ucast_frames_ok;
+	u64 tx_bcast_frames_ok;
+	u64 tx_mcast_frames_ok;
+	u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+	/* Rx */
+	u64 rx_frames_ok;
+	u64 rx_frames_64;
+	u64 rx_frames_127;
+	u64 rx_frames_255;
+	u64 rx_frames_511;
+	u64 rx_frames_1023;
+	u64 rx_frames_1518;
+	u64 rx_frames_jumbo;
+	u64 rx_drops;
+	/* Tx */
+	u64 tx_frames_ok;
+	u64 tx_drops;
+	u64 tx_busy;
+	u64 tx_tso;
+};
+
+struct nicvf {
+	struct net_device	*netdev;
+	struct pci_dev		*pdev;
+	u8			vf_id;
+	u8			node;
+	u8			tns_mode;
+	u16			mtu;
+	struct queue_set	*qs;
+	void __iomem		*reg_base;
+	bool			link_up;
+	u8			duplex;
+	u32			speed;
+	struct page		*rb_page;
+	u32			rb_page_offset;
+	bool			rb_alloc_fail;
+	bool			rb_work_scheduled;
+	struct delayed_work	rbdr_work;
+	struct tasklet_struct	rbdr_task;
+	struct tasklet_struct	qs_err_task;
+	struct tasklet_struct	cq_task;
+	struct nicvf_cq_poll	*napi[8];
+	struct nicvf_rss_info	rss_info;
+	u8			cpi_alg;
+	/* Interrupt coalescing settings */
+	u32			cq_coalesce_usecs;
+
+	u32			msg_enable;
+	struct nicvf_hw_stats   stats;
+	struct nicvf_drv_stats  drv_stats;
+	struct bgx_stats	bgx_stats;
+	struct work_struct	reset_task;
+
+	/* MSI-X  */
+	bool			msix_enabled;
+	u8			num_vec;
+	struct msix_entry	msix_entries[NIC_VF_MSIX_VECTORS];
+	char			irq_name[NIC_VF_MSIX_VECTORS][20];
+	bool			irq_allocated[NIC_VF_MSIX_VECTORS];
+
+	bool			pf_ready_to_rcv_msg;
+	bool			pf_acked;
+	bool			pf_nacked;
+	bool			bgx_stats_acked;
+	bool			set_mac_pending;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define	NIC_PF_VF_MAILBOX_SIZE		2
+#define	NIC_MBOX_MSG_TIMEOUT		2000 /* ms */
+
+/* Mailbox message types */
+#define	NIC_MBOX_MSG_READY		0x01	/* Is PF ready to rcv msgs */
+#define	NIC_MBOX_MSG_ACK		0x02	/* ACK the message received */
+#define	NIC_MBOX_MSG_NACK		0x03	/* NACK the message received */
+#define	NIC_MBOX_MSG_QS_CFG		0x04	/* Configure Qset */
+#define	NIC_MBOX_MSG_RQ_CFG		0x05	/* Configure receive queue */
+#define	NIC_MBOX_MSG_SQ_CFG		0x06	/* Configure Send queue */
+#define	NIC_MBOX_MSG_RQ_DROP_CFG	0x07	/* Configure receive queue */
+#define	NIC_MBOX_MSG_SET_MAC		0x08	/* Add MAC ID to DMAC filter */
+#define	NIC_MBOX_MSG_SET_MAX_FRS	0x09	/* Set max frame size */
+#define	NIC_MBOX_MSG_CPI_CFG		0x0A	/* Config CPI, RSSI */
+#define	NIC_MBOX_MSG_RSS_SIZE		0x0B	/* Get RSS indir_tbl size */
+#define	NIC_MBOX_MSG_RSS_CFG		0x0C	/* Config RSS table */
+#define	NIC_MBOX_MSG_RSS_CFG_CONT	0x0D	/* RSS config continuation */
+#define	NIC_MBOX_MSG_RQ_BP_CFG		0x0E	/* RQ backpressure config */
+#define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
+#define	NIC_MBOX_MSG_BGX_STATS		0x10	/* Get stats from BGX */
+#define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
+#define NIC_MBOX_MSG_CFG_DONE		0x12	/* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN		0x13	/* VF is being shutdown */
+
+struct nic_cfg_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    tns_mode;
+	u8    node_id;
+	u8    mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+	u8    msg;
+	u8    num;
+	u64   cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+	u8    msg;
+	u8    qs_num;
+	u8    rq_num;
+	u64   cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+	u8    msg;
+	u8    qs_num;
+	u8    sq_num;
+	u64   cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+	u8    msg;
+	u8    vf_id;
+	u16   max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    rq_cnt;
+	u8    cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+	u8    msg;
+	u8    vf_id;
+	u16   ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    hash_bits;
+	u8    tbl_len;
+	u8    tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG	8
+	u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    rx;
+	u8    idx;
+	u64   stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+	u8    msg;
+	u8    link_up;
+	u8    duplex;
+	u32   speed;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+	struct { u8 msg; }	msg;
+	struct nic_cfg_msg	nic_cfg;
+	struct qs_cfg_msg	qs;
+	struct rq_cfg_msg	rq;
+	struct sq_cfg_msg	sq;
+	struct set_mac_msg	mac;
+	struct set_frs_msg	frs;
+	struct cpi_cfg_msg	cpi_cfg;
+	struct rss_sz_msg	rss_size;
+	struct rss_cfg_msg	rss_cfg;
+	struct bgx_stats_msg    bgx_stats;
+	struct bgx_link_status  link_status;
+};
+
+#define NIC_NODE_ID_MASK	0x03
+#define NIC_NODE_ID_SHIFT	44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+	u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+	return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+			      int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644
index 0000000..6e0c031
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME	"thunder-nic"
+#define DRV_VERSION	"1.0"
+
+struct nicpf {
+	struct pci_dev		*pdev;
+	u8			rev_id;
+	u8			node;
+	unsigned int		flags;
+	u8			num_vf_en;      /* No of VF enabled */
+	bool			vf_enabled[MAX_NUM_VFS_SUPPORTED];
+	void __iomem		*reg_base;       /* Register start address */
+	struct pkind_cfg	pkind;
+#define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	(((bgx & 0xF) << 4) | (lmac & 0xF))
+#define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	((map >> 4) & 0xF)
+#define	NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)	(map & 0xF)
+	u8			vf_lmac_map[MAX_LMAC];
+	struct delayed_work     dwork;
+	struct workqueue_struct *check_link;
+	u8			link[MAX_LMAC];
+	u8			duplex[MAX_LMAC];
+	u32			speed[MAX_LMAC];
+	u16			cpi_base[MAX_NUM_VFS_SUPPORTED];
+	u16			rss_ind_tbl_size;
+	bool			mbx_lock[MAX_NUM_VFS_SUPPORTED];
+
+	/* MSI-X */
+	bool			msix_enabled;
+	u8			num_vec;
+	struct msix_entry	msix_entries[NIC_PF_MSIX_VECTORS];
+	bool			irq_allocated[NIC_PF_MSIX_VECTORS];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+	{ 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver.  The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+	writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+	return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+	/* Enable mailbox interrupt for all 128 VFs */
+	nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
+	nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+	nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+	return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+	void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+	u64 *msg = (u64 *)mbx;
+
+	/* In first revision HW, mbox interrupt is triggerred
+	 * when PF writes to MBOX(1), in next revisions when
+	 * PF writes to MBOX(0)
+	 */
+	if (nic->rev_id == 0) {
+		/* see the comment for nic_reg_write()/nic_reg_read()
+		 * functions above
+		 */
+		writeq_relaxed(msg[0], mbx_addr);
+		writeq_relaxed(msg[1], mbx_addr + 8);
+	} else {
+		writeq_relaxed(msg[1], mbx_addr + 8);
+		writeq_relaxed(msg[0], mbx_addr);
+	}
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+	int bgx_idx, lmac;
+	const char *mac;
+
+	mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+	mbx.nic_cfg.vf_id = vf;
+
+	mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+	mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+	if (mac)
+		ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+
+	mbx.nic_cfg.node_id = nic->node;
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_ACK;
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_NACK;
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+	u16 timeout = ~0x00;
+
+	nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+	/* Wait till sync cycle is finished */
+	while (timeout) {
+		if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+			break;
+		timeout--;
+	}
+	nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+	if (!timeout) {
+		dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+		return 1;
+	}
+	return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+	int bgx_idx, lmac;
+	union nic_mbx mbx = {};
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+	mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+	mbx.bgx_stats.vf_id = bgx->vf_id;
+	mbx.bgx_stats.rx = bgx->rx;
+	mbx.bgx_stats.idx = bgx->idx;
+	if (bgx->rx)
+		mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+							    lmac, bgx->idx);
+	else
+		mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+							    lmac, bgx->idx);
+	nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+	if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
+		dev_err(&nic->pdev->dev,
+			"Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
+			   vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+		return 1;
+	}
+	new_frs += ETH_HLEN;
+	if (new_frs <= nic->pkind.maxlen)
+		return 0;
+
+	nic->pkind.maxlen = new_frs;
+	nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+	return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+	int lmac;
+	u64 lmac_cfg;
+
+	/* Max value that can be set is 60 */
+	if (size > 60)
+		size = 60;
+
+	for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+		lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+		lmac_cfg &= ~(0xF << 2);
+		lmac_cfg |= ((size / 4) << 2);
+		nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+	}
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+	unsigned bgx_map = bgx_get_map(nic->node);
+	int bgx, next_bgx_lmac = 0;
+	int lmac, lmac_cnt = 0;
+	u64 lmac_credit;
+
+	nic->num_vf_en = 0;
+
+	for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+		if (!(bgx_map & (1 << bgx)))
+			continue;
+		lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+		for (lmac = 0; lmac < lmac_cnt; lmac++)
+			nic->vf_lmac_map[next_bgx_lmac++] =
+						NIC_SET_VF_LMAC_MAP(bgx, lmac);
+		nic->num_vf_en += lmac_cnt;
+
+		/* Program LMAC credits */
+		lmac_credit = (1ull << 1); /* channel credit enable */
+		lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+		/* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+		lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+				NIC_HW_MAX_FRS) / 16) << 12);
+		lmac = bgx * MAX_LMAC_PER_BGX;
+		for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+			nic_reg_write(nic,
+				      NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+				      lmac_credit);
+	}
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+	int i;
+
+	/* Reset NIC, in case the driver is repeatedly inserted and removed */
+	nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
+
+	/* Enable NIC HW block */
+	nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+	/* Enable backpressure */
+	nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+	/* Disable TNS mode on both interfaces */
+	nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+		      (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+	nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+		      (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+		      (1ULL << 63) | BGX0_BLOCK);
+	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+		      (1ULL << 63) | BGX1_BLOCK);
+
+	/* PKIND configuration */
+	nic->pkind.minlen = 0;
+	nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+	nic->pkind.lenerr_en = 1;
+	nic->pkind.rx_hdr = 0;
+	nic->pkind.hdr_sl = 0;
+
+	for (i = 0; i < NIC_MAX_PKIND; i++)
+		nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+			      *(u64 *)&nic->pkind);
+
+	nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+	/* Timer config */
+	nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+	u32 vnic, bgx, lmac, chan;
+	u32 padd, cpi_count = 0;
+	u64 cpi_base, cpi, rssi_base, rssi;
+	u8  qset, rq_idx = 0;
+
+	vnic = cfg->vf_id;
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+	chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+	cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
+	rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+
+	/* Rx channel configuration */
+	nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+		      (1ull << 63) | (vnic << 0));
+	nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+		      ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+	if (cfg->cpi_alg == CPI_ALG_NONE)
+		cpi_count = 1;
+	else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+		cpi_count = 8;
+	else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+		cpi_count = 16;
+	else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+		cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+	/* RSS Qset, Qidx mapping */
+	qset = cfg->vf_id;
+	rssi = rssi_base;
+	for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+		nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+			      (qset << 3) | rq_idx);
+		rq_idx++;
+	}
+
+	rssi = 0;
+	cpi = cpi_base;
+	for (; cpi < (cpi_base + cpi_count); cpi++) {
+		/* Determine port to channel adder */
+		if (cfg->cpi_alg != CPI_ALG_DIFF)
+			padd = cpi % cpi_count;
+		else
+			padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+		/* Leave RSS_SIZE as '0' to disable RSS */
+		nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+			      (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+
+		if ((rssi + 1) >= cfg->rq_cnt)
+			continue;
+
+		if (cfg->cpi_alg == CPI_ALG_VLAN)
+			rssi++;
+		else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+			rssi = ((cpi - cpi_base) & 0xe) >> 1;
+		else if (cfg->cpi_alg == CPI_ALG_DIFF)
+			rssi = ((cpi - cpi_base) & 0x38) >> 3;
+	}
+	nic->cpi_base[cfg->vf_id] = cpi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+	u64  *msg;
+
+	msg = (u64 *)&mbx;
+
+	mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+	mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+	u8  qset, idx = 0;
+	u64 cpi_cfg, cpi_base, rssi_base, rssi;
+
+	cpi_base = nic->cpi_base[cfg->vf_id];
+	cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
+	rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+
+	rssi = rssi_base;
+	qset = cfg->vf_id;
+
+	for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+		nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+			      (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+		idx++;
+	}
+
+	cpi_cfg &= ~(0xFULL << 20);
+	cpi_cfg |= (cfg->hash_bits << 20);
+	nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0
+ * VNIC0-SQ0 -> TL4(0)   -> TL3[0]   -> TL2[0]  -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8)   -> TL3[2]   -> TL2[0]  -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16)  -> TL3[4]   -> TL2[1]  -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24)  -> TL3[6]   -> TL2[1]  -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+{
+	u32 bgx, lmac, chan;
+	u32 tl2, tl3, tl4;
+	u32 rr_quantum;
+
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+	/* 24 bytes for FCS, IPG and preamble */
+	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+	tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+	tl4 += sq_idx;
+	tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+		      ((u64)vnic << NIC_QS_ID_SHIFT) |
+		      ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+	nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+		      ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+	nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+	chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+	nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+	/* Enable backpressure on the channel */
+	nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+	tl2 = tl3 >> 2;
+	nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+	nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+	/* No priorities as of now */
+	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+	u64 *mbx_data;
+	u64 mbx_addr;
+	u64 reg_addr;
+	int bgx, lmac;
+	int i;
+	int ret = 0;
+
+	nic->mbx_lock[vf] = true;
+
+	mbx_addr = nic_get_mbx_addr(vf);
+	mbx_data = (u64 *)&mbx;
+
+	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+		*mbx_data = nic_reg_read(nic, mbx_addr);
+		mbx_data++;
+		mbx_addr += sizeof(u64);
+	}
+
+	dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+		__func__, mbx.msg.msg, vf);
+	switch (mbx.msg.msg) {
+	case NIC_MBOX_MSG_READY:
+		nic_mbx_send_ready(nic, vf);
+		nic->link[vf] = 0;
+		nic->duplex[vf] = 0;
+		nic->speed[vf] = 0;
+		ret = 1;
+		break;
+	case NIC_MBOX_MSG_QS_CFG:
+		reg_addr = NIC_PF_QSET_0_127_CFG |
+			   (mbx.qs.num << NIC_QS_ID_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+		break;
+	case NIC_MBOX_MSG_RQ_CFG:
+		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+		break;
+	case NIC_MBOX_MSG_RQ_BP_CFG:
+		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+		break;
+	case NIC_MBOX_MSG_RQ_SW_SYNC:
+		ret = nic_rcv_queue_sw_sync(nic);
+		break;
+	case NIC_MBOX_MSG_RQ_DROP_CFG:
+		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+		break;
+	case NIC_MBOX_MSG_SQ_CFG:
+		reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+			   (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+		nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+		break;
+	case NIC_MBOX_MSG_SET_MAC:
+		lmac = mbx.mac.vf_id;
+		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+		bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+		break;
+	case NIC_MBOX_MSG_SET_MAX_FRS:
+		ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+					mbx.frs.vf_id);
+		break;
+	case NIC_MBOX_MSG_CPI_CFG:
+		nic_config_cpi(nic, &mbx.cpi_cfg);
+		break;
+	case NIC_MBOX_MSG_RSS_SIZE:
+		nic_send_rss_size(nic, vf);
+		goto unlock;
+	case NIC_MBOX_MSG_RSS_CFG:
+	case NIC_MBOX_MSG_RSS_CFG_CONT:
+		nic_config_rss(nic, &mbx.rss_cfg);
+		break;
+	case NIC_MBOX_MSG_CFG_DONE:
+		/* Last message of VF config msg sequence */
+		nic->vf_enabled[vf] = true;
+		goto unlock;
+	case NIC_MBOX_MSG_SHUTDOWN:
+		/* First msg in VF teardown sequence */
+		nic->vf_enabled[vf] = false;
+		break;
+	case NIC_MBOX_MSG_BGX_STATS:
+		nic_get_bgx_stats(nic, &mbx.bgx_stats);
+		goto unlock;
+	default:
+		dev_err(&nic->pdev->dev,
+			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+		break;
+	}
+
+	if (!ret)
+		nic_mbx_send_ack(nic, vf);
+	else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+		nic_mbx_send_nack(nic, vf);
+unlock:
+	nic->mbx_lock[vf] = false;
+}
+
+static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+{
+	u64 intr;
+	u8  vf, vf_per_mbx_reg = 64;
+
+	intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+	dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+	for (vf = 0; vf < vf_per_mbx_reg; vf++) {
+		if (intr & (1ULL << vf)) {
+			dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+				vf + (mbx * vf_per_mbx_reg));
+			if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
+				break;
+			nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
+			nic_clear_mbx_intr(nic, vf, mbx);
+		}
+	}
+}
+
+static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
+{
+	struct nicpf *nic = (struct nicpf *)nic_irq;
+
+	nic_mbx_intr_handler(nic, 0);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
+{
+	struct nicpf *nic = (struct nicpf *)nic_irq;
+
+	nic_mbx_intr_handler(nic, 1);
+
+	return IRQ_HANDLED;
+}
+
+static int nic_enable_msix(struct nicpf *nic)
+{
+	int i, ret;
+
+	nic->num_vec = NIC_PF_MSIX_VECTORS;
+
+	for (i = 0; i < nic->num_vec; i++)
+		nic->msix_entries[i].entry = i;
+
+	ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+	if (ret) {
+		dev_err(&nic->pdev->dev,
+			"Request for #%d msix vectors failed\n",
+			   nic->num_vec);
+		return ret;
+	}
+
+	nic->msix_enabled = 1;
+	return 0;
+}
+
+static void nic_disable_msix(struct nicpf *nic)
+{
+	if (nic->msix_enabled) {
+		pci_disable_msix(nic->pdev);
+		nic->msix_enabled = 0;
+		nic->num_vec = 0;
+	}
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+	int irq;
+
+	for (irq = 0; irq < nic->num_vec; irq++) {
+		if (nic->irq_allocated[irq])
+			free_irq(nic->msix_entries[irq].vector, nic);
+		nic->irq_allocated[irq] = false;
+	}
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+	int ret;
+
+	/* Enable MSI-X */
+	ret = nic_enable_msix(nic);
+	if (ret)
+		return ret;
+
+	/* Register mailbox interrupt handlers */
+	ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
+			  nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
+	if (ret)
+		goto fail;
+
+	nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+
+	ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
+			  nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
+	if (ret)
+		goto fail;
+
+	nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+
+	/* Enable mailbox interrupt */
+	nic_enable_mbx_intr(nic);
+	return 0;
+
+fail:
+	dev_err(&nic->pdev->dev, "Request irq failed\n");
+	nic_free_all_interrupts(nic);
+	return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+	nic_free_all_interrupts(nic);
+	nic_disable_msix(nic);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+	int pos = 0;
+	int err;
+	u16 total_vf_cnt;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos) {
+		dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+		return -ENODEV;
+	}
+
+	pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+	if (total_vf_cnt < nic->num_vf_en)
+		nic->num_vf_en = total_vf_cnt;
+
+	if (!total_vf_cnt)
+		return 0;
+
+	err = pci_enable_sriov(pdev, nic->num_vf_en);
+	if (err) {
+		dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+			nic->num_vf_en);
+		nic->num_vf_en = 0;
+		return err;
+	}
+
+	dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+		 nic->num_vf_en);
+
+	nic->flags |= NIC_SRIOV_ENABLED;
+	return 0;
+}
+
+/* Poll for BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_poll_for_link(struct work_struct *work)
+{
+	union nic_mbx mbx = {};
+	struct nicpf *nic;
+	struct bgx_link_status link;
+	u8 vf, bgx, lmac;
+
+	nic = container_of(work, struct nicpf, dwork.work);
+
+	mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+	for (vf = 0; vf < nic->num_vf_en; vf++) {
+		/* Poll only if VF is UP */
+		if (!nic->vf_enabled[vf])
+			continue;
+
+		/* Get BGX, LMAC indices for the VF */
+		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		/* Get interface link status */
+		bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+		/* Inform VF only if link status changed */
+		if (nic->link[vf] == link.link_up)
+			continue;
+
+		if (!nic->mbx_lock[vf]) {
+			nic->link[vf] = link.link_up;
+			nic->duplex[vf] = link.duplex;
+			nic->speed[vf] = link.speed;
+
+			/* Send a mbox message to VF with current link status */
+			mbx.link_status.link_up = link.link_up;
+			mbx.link_status.duplex = link.duplex;
+			mbx.link_status.speed = link.speed;
+			nic_send_msg_to_vf(nic, vf, &mbx);
+		}
+	}
+	queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct nicpf *nic;
+	int    err;
+
+	BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+	nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+	if (!nic)
+		return -ENOMEM;
+
+	pci_set_drvdata(pdev, nic);
+
+	nic->pdev = pdev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		pci_set_drvdata(pdev, NULL);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto err_disable_device;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get usable DMA configuration\n");
+		goto err_release_regions;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+		goto err_release_regions;
+	}
+
+	/* MAP PF's configuration registers */
+	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+	if (!nic->reg_base) {
+		dev_err(dev, "Cannot map config register space, aborting\n");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
+
+	nic->node = nic_get_node_id(pdev);
+
+	nic_set_lmac_vf_mapping(nic);
+
+	/* Initialize hardware */
+	nic_init_hw(nic);
+
+	/* Set RSS TBL size for each VF */
+	nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+	/* Register interrupts */
+	err = nic_register_interrupts(nic);
+	if (err)
+		goto err_release_regions;
+
+	/* Configure SRIOV */
+	err = nic_sriov_init(pdev, nic);
+	if (err)
+		goto err_unregister_interrupts;
+
+	/* Register a physical link status poll fn() */
+	nic->check_link = alloc_workqueue("check_link_status",
+					  WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!nic->check_link) {
+		err = -ENOMEM;
+		goto err_disable_sriov;
+	}
+
+	INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
+	queue_delayed_work(nic->check_link, &nic->dwork, 0);
+
+	return 0;
+
+err_disable_sriov:
+	if (nic->flags & NIC_SRIOV_ENABLED)
+		pci_disable_sriov(pdev);
+err_unregister_interrupts:
+	nic_unregister_interrupts(nic);
+err_release_regions:
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+	struct nicpf *nic = pci_get_drvdata(pdev);
+
+	if (nic->flags & NIC_SRIOV_ENABLED)
+		pci_disable_sriov(pdev);
+
+	if (nic->check_link) {
+		/* Destroy work Queue */
+		cancel_delayed_work(&nic->dwork);
+		flush_workqueue(nic->check_link);
+		destroy_workqueue(nic->check_link);
+	}
+
+	nic_unregister_interrupts(nic);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+	.name = DRV_NAME,
+	.id_table = nic_id_table,
+	.probe = nic_probe,
+	.remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+	pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+	return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+	pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644
index 0000000..58197bb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define   NIC_PF_REG_COUNT			29573
+#define   NIC_VF_REG_COUNT			249
+
+/* Physical function register offsets */
+#define   NIC_PF_CFG				(0x0000)
+#define   NIC_PF_STATUS				(0x0010)
+#define   NIC_PF_INTR_TIMER_CFG			(0x0030)
+#define   NIC_PF_BIST_STATUS			(0x0040)
+#define   NIC_PF_SOFT_RESET			(0x0050)
+#define   NIC_PF_TCP_TIMER			(0x0060)
+#define   NIC_PF_BP_CFG				(0x0080)
+#define   NIC_PF_RRM_CFG			(0x0088)
+#define   NIC_PF_CQM_CF				(0x00A0)
+#define   NIC_PF_CNM_CF				(0x00A8)
+#define   NIC_PF_CNM_STATUS			(0x00B0)
+#define   NIC_PF_CQ_AVG_CFG			(0x00C0)
+#define   NIC_PF_RRM_AVG_CFG			(0x00C8)
+#define   NIC_PF_INTF_0_1_SEND_CFG		(0x0200)
+#define   NIC_PF_INTF_0_1_BP_CFG		(0x0208)
+#define   NIC_PF_INTF_0_1_BP_DIS_0_1		(0x0210)
+#define   NIC_PF_INTF_0_1_BP_SW_0_1		(0x0220)
+#define   NIC_PF_RBDR_BP_STATE_0_3		(0x0240)
+#define   NIC_PF_MAILBOX_INT			(0x0410)
+#define   NIC_PF_MAILBOX_INT_W1S		(0x0430)
+#define   NIC_PF_MAILBOX_ENA_W1C		(0x0450)
+#define   NIC_PF_MAILBOX_ENA_W1S		(0x0470)
+#define   NIC_PF_RX_ETYPE_0_7			(0x0500)
+#define   NIC_PF_PKIND_0_15_CFG			(0x0600)
+#define   NIC_PF_ECC0_FLIP0			(0x1000)
+#define   NIC_PF_ECC1_FLIP0			(0x1008)
+#define   NIC_PF_ECC2_FLIP0			(0x1010)
+#define   NIC_PF_ECC3_FLIP0			(0x1018)
+#define   NIC_PF_ECC0_FLIP1			(0x1080)
+#define   NIC_PF_ECC1_FLIP1			(0x1088)
+#define   NIC_PF_ECC2_FLIP1			(0x1090)
+#define   NIC_PF_ECC3_FLIP1			(0x1098)
+#define   NIC_PF_ECC0_CDIS			(0x1100)
+#define   NIC_PF_ECC1_CDIS			(0x1108)
+#define   NIC_PF_ECC2_CDIS			(0x1110)
+#define   NIC_PF_ECC3_CDIS			(0x1118)
+#define   NIC_PF_BIST0_STATUS			(0x1280)
+#define   NIC_PF_BIST1_STATUS			(0x1288)
+#define   NIC_PF_BIST2_STATUS			(0x1290)
+#define   NIC_PF_BIST3_STATUS			(0x1298)
+#define   NIC_PF_ECC0_SBE_INT			(0x2000)
+#define   NIC_PF_ECC0_SBE_INT_W1S		(0x2008)
+#define   NIC_PF_ECC0_SBE_ENA_W1C		(0x2010)
+#define   NIC_PF_ECC0_SBE_ENA_W1S		(0x2018)
+#define   NIC_PF_ECC0_DBE_INT			(0x2100)
+#define   NIC_PF_ECC0_DBE_INT_W1S		(0x2108)
+#define   NIC_PF_ECC0_DBE_ENA_W1C		(0x2110)
+#define   NIC_PF_ECC0_DBE_ENA_W1S		(0x2118)
+#define   NIC_PF_ECC1_SBE_INT			(0x2200)
+#define   NIC_PF_ECC1_SBE_INT_W1S		(0x2208)
+#define   NIC_PF_ECC1_SBE_ENA_W1C		(0x2210)
+#define   NIC_PF_ECC1_SBE_ENA_W1S		(0x2218)
+#define   NIC_PF_ECC1_DBE_INT			(0x2300)
+#define   NIC_PF_ECC1_DBE_INT_W1S		(0x2308)
+#define   NIC_PF_ECC1_DBE_ENA_W1C		(0x2310)
+#define   NIC_PF_ECC1_DBE_ENA_W1S		(0x2318)
+#define   NIC_PF_ECC2_SBE_INT			(0x2400)
+#define   NIC_PF_ECC2_SBE_INT_W1S		(0x2408)
+#define   NIC_PF_ECC2_SBE_ENA_W1C		(0x2410)
+#define   NIC_PF_ECC2_SBE_ENA_W1S		(0x2418)
+#define   NIC_PF_ECC2_DBE_INT			(0x2500)
+#define   NIC_PF_ECC2_DBE_INT_W1S		(0x2508)
+#define   NIC_PF_ECC2_DBE_ENA_W1C		(0x2510)
+#define   NIC_PF_ECC2_DBE_ENA_W1S		(0x2518)
+#define   NIC_PF_ECC3_SBE_INT			(0x2600)
+#define   NIC_PF_ECC3_SBE_INT_W1S		(0x2608)
+#define   NIC_PF_ECC3_SBE_ENA_W1C		(0x2610)
+#define   NIC_PF_ECC3_SBE_ENA_W1S		(0x2618)
+#define   NIC_PF_ECC3_DBE_INT			(0x2700)
+#define   NIC_PF_ECC3_DBE_INT_W1S		(0x2708)
+#define   NIC_PF_ECC3_DBE_ENA_W1C		(0x2710)
+#define   NIC_PF_ECC3_DBE_ENA_W1S		(0x2718)
+#define   NIC_PF_CPI_0_2047_CFG			(0x200000)
+#define   NIC_PF_RSSI_0_4097_RQ			(0x220000)
+#define   NIC_PF_LMAC_0_7_CFG			(0x240000)
+#define   NIC_PF_LMAC_0_7_SW_XOFF		(0x242000)
+#define   NIC_PF_LMAC_0_7_CREDIT		(0x244000)
+#define   NIC_PF_CHAN_0_255_TX_CFG		(0x400000)
+#define   NIC_PF_CHAN_0_255_RX_CFG		(0x420000)
+#define   NIC_PF_CHAN_0_255_SW_XOFF		(0x440000)
+#define   NIC_PF_CHAN_0_255_CREDIT		(0x460000)
+#define   NIC_PF_CHAN_0_255_RX_BP_CFG		(0x480000)
+#define   NIC_PF_SW_SYNC_RX			(0x490000)
+#define   NIC_PF_SW_SYNC_RX_DONE		(0x490008)
+#define   NIC_PF_TL2_0_63_CFG			(0x500000)
+#define   NIC_PF_TL2_0_63_PRI			(0x520000)
+#define   NIC_PF_TL2_0_63_SH_STATUS		(0x580000)
+#define   NIC_PF_TL3A_0_63_CFG			(0x5F0000)
+#define   NIC_PF_TL3_0_255_CFG			(0x600000)
+#define   NIC_PF_TL3_0_255_CHAN			(0x620000)
+#define   NIC_PF_TL3_0_255_PIR			(0x640000)
+#define   NIC_PF_TL3_0_255_SW_XOFF		(0x660000)
+#define   NIC_PF_TL3_0_255_CNM_RATE		(0x680000)
+#define   NIC_PF_TL3_0_255_SH_STATUS		(0x6A0000)
+#define   NIC_PF_TL4A_0_255_CFG			(0x6F0000)
+#define   NIC_PF_TL4_0_1023_CFG			(0x800000)
+#define   NIC_PF_TL4_0_1023_SW_XOFF		(0x820000)
+#define   NIC_PF_TL4_0_1023_SH_STATUS		(0x840000)
+#define   NIC_PF_TL4A_0_1023_CNM_RATE		(0x880000)
+#define   NIC_PF_TL4A_0_1023_CNM_STATUS		(0x8A0000)
+#define   NIC_PF_VF_0_127_MAILBOX_0_1		(0x20002030)
+#define   NIC_PF_VNIC_0_127_TX_STAT_0_4		(0x20004000)
+#define   NIC_PF_VNIC_0_127_RX_STAT_0_13	(0x20004100)
+#define   NIC_PF_QSET_0_127_LOCK_0_15		(0x20006000)
+#define   NIC_PF_QSET_0_127_CFG			(0x20010000)
+#define   NIC_PF_QSET_0_127_RQ_0_7_CFG		(0x20010400)
+#define   NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG	(0x20010420)
+#define   NIC_PF_QSET_0_127_RQ_0_7_BP_CFG	(0x20010500)
+#define   NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1	(0x20010600)
+#define   NIC_PF_QSET_0_127_SQ_0_7_CFG		(0x20010C00)
+#define   NIC_PF_QSET_0_127_SQ_0_7_CFG2		(0x20010C08)
+#define   NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1	(0x20010D00)
+
+#define   NIC_PF_MSIX_VEC_0_18_ADDR		(0x000000)
+#define   NIC_PF_MSIX_VEC_0_CTL			(0x000008)
+#define   NIC_PF_MSIX_PBA_0			(0x0F0000)
+
+/* Virtual function register offsets */
+#define   NIC_VNIC_CFG				(0x000020)
+#define   NIC_VF_PF_MAILBOX_0_1			(0x000130)
+#define   NIC_VF_INT				(0x000200)
+#define   NIC_VF_INT_W1S			(0x000220)
+#define   NIC_VF_ENA_W1C			(0x000240)
+#define   NIC_VF_ENA_W1S			(0x000260)
+
+#define   NIC_VNIC_RSS_CFG			(0x0020E0)
+#define   NIC_VNIC_RSS_KEY_0_4			(0x002200)
+#define   NIC_VNIC_TX_STAT_0_4			(0x004000)
+#define   NIC_VNIC_RX_STAT_0_13			(0x004100)
+#define   NIC_QSET_RQ_GEN_CFG			(0x010010)
+
+#define   NIC_QSET_CQ_0_7_CFG			(0x010400)
+#define   NIC_QSET_CQ_0_7_CFG2			(0x010408)
+#define   NIC_QSET_CQ_0_7_THRESH		(0x010410)
+#define   NIC_QSET_CQ_0_7_BASE			(0x010420)
+#define   NIC_QSET_CQ_0_7_HEAD			(0x010428)
+#define   NIC_QSET_CQ_0_7_TAIL			(0x010430)
+#define   NIC_QSET_CQ_0_7_DOOR			(0x010438)
+#define   NIC_QSET_CQ_0_7_STATUS		(0x010440)
+#define   NIC_QSET_CQ_0_7_STATUS2		(0x010448)
+#define   NIC_QSET_CQ_0_7_DEBUG			(0x010450)
+
+#define   NIC_QSET_RQ_0_7_CFG			(0x010600)
+#define   NIC_QSET_RQ_0_7_STAT_0_1		(0x010700)
+
+#define   NIC_QSET_SQ_0_7_CFG			(0x010800)
+#define   NIC_QSET_SQ_0_7_THRESH		(0x010810)
+#define   NIC_QSET_SQ_0_7_BASE			(0x010820)
+#define   NIC_QSET_SQ_0_7_HEAD			(0x010828)
+#define   NIC_QSET_SQ_0_7_TAIL			(0x010830)
+#define   NIC_QSET_SQ_0_7_DOOR			(0x010838)
+#define   NIC_QSET_SQ_0_7_STATUS		(0x010840)
+#define   NIC_QSET_SQ_0_7_DEBUG			(0x010848)
+#define   NIC_QSET_SQ_0_7_CNM_CHG		(0x010860)
+#define   NIC_QSET_SQ_0_7_STAT_0_1		(0x010900)
+
+#define   NIC_QSET_RBDR_0_1_CFG			(0x010C00)
+#define   NIC_QSET_RBDR_0_1_THRESH		(0x010C10)
+#define   NIC_QSET_RBDR_0_1_BASE		(0x010C20)
+#define   NIC_QSET_RBDR_0_1_HEAD		(0x010C28)
+#define   NIC_QSET_RBDR_0_1_TAIL		(0x010C30)
+#define   NIC_QSET_RBDR_0_1_DOOR		(0x010C38)
+#define   NIC_QSET_RBDR_0_1_STATUS0		(0x010C40)
+#define   NIC_QSET_RBDR_0_1_STATUS1		(0x010C48)
+#define   NIC_QSET_RBDR_0_1_PREFETCH_STATUS	(0x010C50)
+
+#define   NIC_VF_MSIX_VECTOR_0_19_ADDR		(0x000000)
+#define   NIC_VF_MSIX_VECTOR_0_19_CTL		(0x000008)
+#define   NIC_VF_MSIX_PBA			(0x0F0000)
+
+/* Offsets within registers */
+#define   NIC_MSIX_VEC_SHIFT			4
+#define   NIC_Q_NUM_SHIFT			18
+#define   NIC_QS_ID_SHIFT			21
+#define   NIC_VF_NUM_SHIFT			21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_42_63:22;
+	u64 hdr_sl:5;	/* Header skip length */
+	u64 rx_hdr:3;	/* TNS Receive header present */
+	u64 lenerr_en:1;/* L2 length error check enable */
+	u64 reserved_32_32:1;
+	u64 maxlen:16;	/* Max frame size */
+	u64 minlen:16;	/* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 minlen:16;
+	u64 maxlen:16;
+	u64 reserved_32_32:1;
+	u64 lenerr_en:1;
+	u64 rx_hdr:3;
+	u64 hdr_sl:5;
+	u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644
index 0000000..16bd2d7
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/pci.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME	"thunder-nicvf"
+#define DRV_VERSION     "1.0"
+
+struct nicvf_stat {
+	char name[ETH_GSTRING_LEN];
+	unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+	.name = #stat, \
+	.index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+	.name = #stat, \
+	.index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+	NICVF_HW_STAT(rx_bytes_ok),
+	NICVF_HW_STAT(rx_ucast_frames_ok),
+	NICVF_HW_STAT(rx_bcast_frames_ok),
+	NICVF_HW_STAT(rx_mcast_frames_ok),
+	NICVF_HW_STAT(rx_fcs_errors),
+	NICVF_HW_STAT(rx_l2_errors),
+	NICVF_HW_STAT(rx_drop_red),
+	NICVF_HW_STAT(rx_drop_red_bytes),
+	NICVF_HW_STAT(rx_drop_overrun),
+	NICVF_HW_STAT(rx_drop_overrun_bytes),
+	NICVF_HW_STAT(rx_drop_bcast),
+	NICVF_HW_STAT(rx_drop_mcast),
+	NICVF_HW_STAT(rx_drop_l3_bcast),
+	NICVF_HW_STAT(rx_drop_l3_mcast),
+	NICVF_HW_STAT(tx_bytes_ok),
+	NICVF_HW_STAT(tx_ucast_frames_ok),
+	NICVF_HW_STAT(tx_bcast_frames_ok),
+	NICVF_HW_STAT(tx_mcast_frames_ok),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+	NICVF_DRV_STAT(rx_frames_ok),
+	NICVF_DRV_STAT(rx_frames_64),
+	NICVF_DRV_STAT(rx_frames_127),
+	NICVF_DRV_STAT(rx_frames_255),
+	NICVF_DRV_STAT(rx_frames_511),
+	NICVF_DRV_STAT(rx_frames_1023),
+	NICVF_DRV_STAT(rx_frames_1518),
+	NICVF_DRV_STAT(rx_frames_jumbo),
+	NICVF_DRV_STAT(rx_drops),
+	NICVF_DRV_STAT(tx_frames_ok),
+	NICVF_DRV_STAT(tx_busy),
+	NICVF_DRV_STAT(tx_tso),
+	NICVF_DRV_STAT(tx_drops),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+	{ "bytes", 0 },
+	{ "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_settings(struct net_device *netdev,
+			      struct ethtool_cmd *cmd)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	cmd->supported = 0;
+	cmd->transceiver = XCVR_EXTERNAL;
+	if (nic->speed <= 1000) {
+		cmd->port = PORT_MII;
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->port = PORT_FIBRE;
+		cmd->autoneg = AUTONEG_DISABLE;
+	}
+	cmd->duplex = nic->duplex;
+	ethtool_cmd_speed_set(cmd, nic->speed);
+
+	return 0;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+			      struct ethtool_drvinfo *info)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	nic->msg_enable = lvl;
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+	int stats, qidx;
+
+	if (sset != ETH_SS_STATS)
+		return;
+
+	for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+		memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+
+	for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+		memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+
+	for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+			sprintf(data, "rxq%d: %s", qidx,
+				nicvf_queue_stats[stats].name);
+			data += ETH_GSTRING_LEN;
+		}
+	}
+
+	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+			sprintf(data, "txq%d: %s", qidx,
+				nicvf_queue_stats[stats].name);
+			data += ETH_GSTRING_LEN;
+		}
+	}
+
+	for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+		sprintf(data, "bgx_rxstat%d: ", stats);
+		data += ETH_GSTRING_LEN;
+	}
+
+	for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+		sprintf(data, "bgx_txstat%d: ", stats);
+		data += ETH_GSTRING_LEN;
+	}
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+	if (sset != ETH_SS_STATS)
+		return -EINVAL;
+
+	return nicvf_n_hw_stats + nicvf_n_drv_stats +
+		(nicvf_n_queue_stats *
+		 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+		BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	int stat, qidx;
+
+	nicvf_update_stats(nic);
+
+	/* Update LMAC stats */
+	nicvf_update_lmac_stats(nic);
+
+	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+		*(data++) = ((u64 *)&nic->stats)
+				[nicvf_hw_stats[stat].index];
+	for (stat = 0; stat < nicvf_n_drv_stats; stat++)
+		*(data++) = ((u64 *)&nic->drv_stats)
+				[nicvf_drv_stats[stat].index];
+
+	for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+			*(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
+					[nicvf_queue_stats[stat].index];
+	}
+
+	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+			*(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
+					[nicvf_queue_stats[stat].index];
+	}
+
+	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+		*(data++) = nic->bgx_stats.rx_stats[stat];
+	for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+		*(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+	return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+			   struct ethtool_regs *regs, void *reg)
+{
+	struct nicvf *nic = netdev_priv(dev);
+	u64 *p = (u64 *)reg;
+	u64 reg_offset;
+	int mbox, key, stat, q;
+	int i = 0;
+
+	regs->version = 0;
+	memset(p, 0, NIC_VF_REG_COUNT);
+
+	p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+	/* Mailbox registers */
+	for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+		p[i++] = nicvf_reg_read(nic,
+					NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+	p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+	p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+	p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+	for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+		p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+	/* Tx/Rx statistics */
+	for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+		p[i++] = nicvf_reg_read(nic,
+					NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+	for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+		p[i++] = nicvf_reg_read(nic,
+					NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+	p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+	/* All completion queue's registers */
+	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+	}
+
+	/* All receive queue's registers */
+	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+		p[i++] = nicvf_queue_reg_read(nic,
+						  NIC_QSET_RQ_0_7_STAT_0_1, q);
+		reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+	}
+
+	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+		reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+	}
+
+	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+		p[i++] = nicvf_queue_reg_read(nic,
+					      NIC_QSET_RBDR_0_1_STATUS0, q);
+		p[i++] = nicvf_queue_reg_read(nic,
+					      NIC_QSET_RBDR_0_1_STATUS1, q);
+		reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+	}
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *cmd)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+	return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	struct queue_set *qs = nic->qs;
+
+	ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+	ring->rx_pending = qs->rbdr_len;
+	ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+	ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+				   struct ethtool_rxnfc *info)
+{
+	info->data = 0;
+
+	switch (info->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+	case UDP_V4_FLOW:
+	case UDP_V6_FLOW:
+	case SCTP_V4_FLOW:
+	case SCTP_V6_FLOW:
+		info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		info->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+			   struct ethtool_rxnfc *info, u32 *rules)
+{
+	struct nicvf *nic = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = nic->qs->rq_cnt;
+		ret = 0;
+		break;
+	case ETHTOOL_GRXFH:
+		return nicvf_get_rss_hash_opts(nic, info);
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+				   struct ethtool_rxnfc *info)
+{
+	struct nicvf_rss_info *rss = &nic->rss_info;
+	u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+	if (!rss->enable)
+		netdev_err(nic->netdev,
+			   "RSS is disabled, hash cannot be set\n");
+
+	netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+		    info->flow_type, info->data);
+
+	if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+		return -EINVAL;
+
+	switch (info->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			rss_cfg |= (1ULL << RSS_HASH_TCP);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case UDP_V4_FLOW:
+	case UDP_V6_FLOW:
+		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			rss_cfg |= (1ULL << RSS_HASH_UDP);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case SCTP_V4_FLOW:
+	case SCTP_V6_FLOW:
+		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		rss_cfg = RSS_HASH_IP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+	return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+	struct nicvf *nic = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_SRXFH:
+		return nicvf_set_rss_hash_opts(nic, info);
+	default:
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+	return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+	struct nicvf *nic = netdev_priv(dev);
+
+	return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+			  u8 *hfunc)
+{
+	struct nicvf *nic = netdev_priv(dev);
+	struct nicvf_rss_info *rss = &nic->rss_info;
+	int idx;
+
+	if (indir) {
+		for (idx = 0; idx < rss->rss_size; idx++)
+			indir[idx] = rss->ind_tbl[idx];
+	}
+
+	if (hkey)
+		memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+
+	return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+			  const u8 *hkey, u8 hfunc)
+{
+	struct nicvf *nic = netdev_priv(dev);
+	struct nicvf_rss_info *rss = &nic->rss_info;
+	int idx;
+
+	if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
+		rss->enable = false;
+		rss->hash_bits = 0;
+		return -EIO;
+	}
+
+	/* We do not allow change in unsupported parameters */
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+		return -EOPNOTSUPP;
+
+	rss->enable = true;
+	if (indir) {
+		for (idx = 0; idx < rss->rss_size; idx++)
+			rss->ind_tbl[idx] = indir[idx];
+	}
+
+	if (hkey) {
+		memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+		nicvf_set_rss_key(nic);
+	}
+
+	nicvf_config_rss(nic);
+	return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+			       struct ethtool_channels *channel)
+{
+	struct nicvf *nic = netdev_priv(dev);
+
+	memset(channel, 0, sizeof(*channel));
+
+	channel->max_rx = MAX_RCV_QUEUES_PER_QS;
+	channel->max_tx = MAX_SND_QUEUES_PER_QS;
+
+	channel->rx_count = nic->qs->rq_cnt;
+	channel->tx_count = nic->qs->sq_cnt;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+			      struct ethtool_channels *channel)
+{
+	struct nicvf *nic = netdev_priv(dev);
+	int err = 0;
+
+	if (!channel->rx_count || !channel->tx_count)
+		return -EINVAL;
+	if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+		return -EINVAL;
+	if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+		return -EINVAL;
+
+	nic->qs->rq_cnt = channel->rx_count;
+	nic->qs->sq_cnt = channel->tx_count;
+	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+	err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+	if (err)
+		return err;
+
+	if (!netif_running(dev))
+		return err;
+
+	nicvf_stop(dev);
+	nicvf_open(dev);
+	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+		    nic->qs->sq_cnt, nic->qs->rq_cnt);
+
+	return err;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+	.get_settings		= nicvf_get_settings,
+	.get_link		= ethtool_op_get_link,
+	.get_drvinfo		= nicvf_get_drvinfo,
+	.get_msglevel		= nicvf_get_msglevel,
+	.set_msglevel		= nicvf_set_msglevel,
+	.get_strings		= nicvf_get_strings,
+	.get_sset_count		= nicvf_get_sset_count,
+	.get_ethtool_stats	= nicvf_get_ethtool_stats,
+	.get_regs_len		= nicvf_get_regs_len,
+	.get_regs		= nicvf_get_regs,
+	.get_coalesce		= nicvf_get_coalesce,
+	.get_ringparam		= nicvf_get_ringparam,
+	.get_rxnfc		= nicvf_get_rxnfc,
+	.set_rxnfc		= nicvf_set_rxnfc,
+	.get_rxfh_key_size	= nicvf_get_rxfh_key_size,
+	.get_rxfh_indir_size	= nicvf_get_rxfh_indir_size,
+	.get_rxfh		= nicvf_get_rxfh,
+	.set_rxfh		= nicvf_set_rxfh,
+	.get_channels		= nicvf_get_channels,
+	.set_channels		= nicvf_set_channels,
+	.get_ts_info		= ethtool_op_get_ts_info,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644
index 0000000..8b119a0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -0,0 +1,1341 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME	"thunder-nicvf"
+#define DRV_VERSION	"1.0"
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+			 PCI_DEVICE_ID_THUNDER_NIC_VF,
+			 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+			 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+			 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+	{ 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, S_IRUGO);
+MODULE_PARM_DESC(cpi_alg,
+		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
+					  struct sk_buff *skb)
+{
+	if (skb->len <= 64)
+		nic->drv_stats.rx_frames_64++;
+	else if (skb->len <= 127)
+		nic->drv_stats.rx_frames_127++;
+	else if (skb->len <= 255)
+		nic->drv_stats.rx_frames_255++;
+	else if (skb->len <= 511)
+		nic->drv_stats.rx_frames_511++;
+	else if (skb->len <= 1023)
+		nic->drv_stats.rx_frames_1023++;
+	else if (skb->len <= 1518)
+		nic->drv_stats.rx_frames_1518++;
+	else
+		nic->drv_stats.rx_frames_jumbo++;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver.  The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+	writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+	return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+			   u64 qidx, u64 val)
+{
+	void __iomem *addr = nic->reg_base + offset;
+
+	writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+	void __iomem *addr = nic->reg_base + offset;
+
+	return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+	u64 *msg = (u64 *)mbx;
+
+	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+	int timeout = NIC_MBOX_MSG_TIMEOUT;
+	int sleep = 10;
+
+	nic->pf_acked = false;
+	nic->pf_nacked = false;
+
+	nicvf_write_to_mbx(nic, mbx);
+
+	/* Wait for previous message to be acked, timeout 2sec */
+	while (!nic->pf_acked) {
+		if (nic->pf_nacked)
+			return -EINVAL;
+		msleep(sleep);
+		if (nic->pf_acked)
+			break;
+		timeout -= sleep;
+		if (!timeout) {
+			netdev_err(nic->netdev,
+				   "PF didn't ack to mbox msg %d from VF%d\n",
+				   (mbx->msg.msg & 0xFF), nic->vf_id);
+			return -EBUSY;
+		}
+	}
+	return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+	int timeout = 5000, sleep = 20;
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_READY;
+
+	nic->pf_ready_to_rcv_msg = false;
+
+	nicvf_write_to_mbx(nic, &mbx);
+
+	while (!nic->pf_ready_to_rcv_msg) {
+		msleep(sleep);
+		if (nic->pf_ready_to_rcv_msg)
+			break;
+		timeout -= sleep;
+		if (!timeout) {
+			netdev_err(nic->netdev,
+				   "PF didn't respond to READY msg\n");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+	if (bgx->rx)
+		nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+	else
+		nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void  nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+	u64 *mbx_data;
+	u64 mbx_addr;
+	int i;
+
+	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+	mbx_data = (u64 *)&mbx;
+
+	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+		*mbx_data = nicvf_reg_read(nic, mbx_addr);
+		mbx_data++;
+		mbx_addr += sizeof(u64);
+	}
+
+	netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+	switch (mbx.msg.msg) {
+	case NIC_MBOX_MSG_READY:
+		nic->pf_ready_to_rcv_msg = true;
+		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+		nic->node = mbx.nic_cfg.node_id;
+		if (!nic->set_mac_pending)
+			ether_addr_copy(nic->netdev->dev_addr,
+					mbx.nic_cfg.mac_addr);
+		nic->link_up = false;
+		nic->duplex = 0;
+		nic->speed = 0;
+		break;
+	case NIC_MBOX_MSG_ACK:
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_NACK:
+		nic->pf_nacked = true;
+		break;
+	case NIC_MBOX_MSG_RSS_SIZE:
+		nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_BGX_STATS:
+		nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+		nic->pf_acked = true;
+		nic->bgx_stats_acked = true;
+		break;
+	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+		nic->pf_acked = true;
+		nic->link_up = mbx.link_status.link_up;
+		nic->duplex = mbx.link_status.duplex;
+		nic->speed = mbx.link_status.speed;
+		if (nic->link_up) {
+			netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+				    nic->netdev->name, nic->speed,
+				    nic->duplex == DUPLEX_FULL ?
+				"Full duplex" : "Half duplex");
+			netif_carrier_on(nic->netdev);
+			netif_tx_wake_all_queues(nic->netdev);
+		} else {
+			netdev_info(nic->netdev, "%s: Link is Down\n",
+				    nic->netdev->name);
+			netif_carrier_off(nic->netdev);
+			netif_tx_stop_all_queues(nic->netdev);
+		}
+		break;
+	default:
+		netdev_err(nic->netdev,
+			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+		break;
+	}
+	nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+	union nic_mbx mbx = {};
+
+	mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+	mbx.mac.vf_id = nic->vf_id;
+	ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+	mbx.cpi_cfg.vf_id = nic->vf_id;
+	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+	mbx.rss_size.vf_id = nic->vf_id;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+	struct nicvf_rss_info *rss = &nic->rss_info;
+	int ind_tbl_len = rss->rss_size;
+	int i, nextq = 0;
+
+	mbx.rss_cfg.vf_id = nic->vf_id;
+	mbx.rss_cfg.hash_bits = rss->hash_bits;
+	while (ind_tbl_len) {
+		mbx.rss_cfg.tbl_offset = nextq;
+		mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+					       RSS_IND_TBL_LEN_PER_MBX_MSG);
+		mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+			  NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+		for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+			mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+		nicvf_send_msg_to_pf(nic, &mbx);
+
+		ind_tbl_len -= mbx.rss_cfg.tbl_len;
+	}
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+	struct nicvf_rss_info *rss = &nic->rss_info;
+	u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+	int idx;
+
+	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+		nicvf_reg_write(nic, key_addr, rss->key[idx]);
+		key_addr += sizeof(u64);
+	}
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+	struct nicvf_rss_info *rss = &nic->rss_info;
+	int idx;
+
+	nicvf_get_rss_size(nic);
+
+	if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+		rss->enable = false;
+		rss->hash_bits = 0;
+		return 0;
+	}
+
+	rss->enable = true;
+
+	/* Using the HW reset value for now */
+	rss->key[0] = 0xFEED0BADFEED0BADULL;
+	rss->key[1] = 0xFEED0BADFEED0BADULL;
+	rss->key[2] = 0xFEED0BADFEED0BADULL;
+	rss->key[3] = 0xFEED0BADFEED0BADULL;
+	rss->key[4] = 0xFEED0BADFEED0BADULL;
+
+	nicvf_set_rss_key(nic);
+
+	rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+	rss->hash_bits =  ilog2(rounddown_pow_of_two(rss->rss_size));
+
+	for (idx = 0; idx < rss->rss_size; idx++)
+		rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+							       nic->qs->rq_cnt);
+	nicvf_config_rss(nic);
+	return 1;
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+			      int tx_queues, int rx_queues)
+{
+	int err = 0;
+
+	err = netif_set_real_num_tx_queues(netdev, tx_queues);
+	if (err) {
+		netdev_err(netdev,
+			   "Failed to set no of Tx queues: %d\n", tx_queues);
+		return err;
+	}
+
+	err = netif_set_real_num_rx_queues(netdev, rx_queues);
+	if (err)
+		netdev_err(netdev,
+			   "Failed to set no of Rx queues: %d\n", rx_queues);
+	return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+	int err;
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+
+	/* Enable Qset */
+	nicvf_qset_config(nic, true);
+
+	/* Initialize queues and HW for data transfer */
+	err = nicvf_config_data_transfer(nic, true);
+	if (err) {
+		netdev_err(nic->netdev,
+			   "Failed to alloc/config VF's QSet resources\n");
+		return err;
+	}
+
+	/* Send VF config done msg to PF */
+	nicvf_write_to_mbx(nic, &mbx);
+
+	return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+				  struct cmp_queue *cq,
+				  struct cqe_send_t *cqe_tx, int cqe_type)
+{
+	struct sk_buff *skb = NULL;
+	struct nicvf *nic = netdev_priv(netdev);
+	struct snd_queue *sq;
+	struct sq_hdr_subdesc *hdr;
+
+	sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+		return;
+
+	netdev_dbg(nic->netdev,
+		   "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
+		   __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
+		   cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+
+	nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+	nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+	skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+	/* For TSO offloaded packets only one head SKB needs to be freed */
+	if (skb) {
+		prefetch(skb);
+		dev_consume_skb_any(skb);
+	}
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+				  struct napi_struct *napi,
+				  struct cmp_queue *cq,
+				  struct cqe_rx_t *cqe_rx, int cqe_type)
+{
+	struct sk_buff *skb;
+	struct nicvf *nic = netdev_priv(netdev);
+	int err = 0;
+
+	/* Check for errors */
+	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+	if (err && !cqe_rx->rb_cnt)
+		return;
+
+	skb = nicvf_get_rcv_skb(nic, cqe_rx);
+	if (!skb) {
+		netdev_dbg(nic->netdev, "Packet not received\n");
+		return;
+	}
+
+	if (netif_msg_pktdata(nic)) {
+		netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
+			    skb, skb->len);
+		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+			       skb->data, skb->len, true);
+	}
+
+	nicvf_set_rx_frame_cnt(nic, skb);
+
+	skb_record_rx_queue(skb, cqe_rx->rq_idx);
+	if (netdev->hw_features & NETIF_F_RXCSUM) {
+		/* HW by default verifies TCP/UDP/SCTP checksums */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else {
+		skb_checksum_none_assert(skb);
+	}
+
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	if (napi && (netdev->features & NETIF_F_GRO))
+		napi_gro_receive(napi, skb);
+	else
+		netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+				 struct napi_struct *napi, int budget)
+{
+	int processed_cqe, work_done = 0;
+	int cqe_count, cqe_head;
+	struct nicvf *nic = netdev_priv(netdev);
+	struct queue_set *qs = nic->qs;
+	struct cmp_queue *cq = &qs->cq[cq_idx];
+	struct cqe_rx_t *cq_desc;
+
+	spin_lock_bh(&cq->lock);
+loop:
+	processed_cqe = 0;
+	/* Get no of valid CQ entries to process */
+	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+	cqe_count &= CQ_CQE_COUNT;
+	if (!cqe_count)
+		goto done;
+
+	/* Get head of the valid CQ entries */
+	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+	cqe_head &= 0xFFFF;
+
+	netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
+		   __func__, cqe_count, cqe_head);
+	while (processed_cqe < cqe_count) {
+		/* Get the CQ descriptor */
+		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+		cqe_head++;
+		cqe_head &= (cq->dmem.q_len - 1);
+		/* Initiate prefetch for next descriptor */
+		prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+		if ((work_done >= budget) && napi &&
+		    (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+			break;
+		}
+
+		netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
+			   cq_desc->cqe_type);
+		switch (cq_desc->cqe_type) {
+		case CQE_TYPE_RX:
+			nicvf_rcv_pkt_handler(netdev, napi, cq,
+					      cq_desc, CQE_TYPE_RX);
+			work_done++;
+		break;
+		case CQE_TYPE_SEND:
+			nicvf_snd_pkt_handler(netdev, cq,
+					      (void *)cq_desc, CQE_TYPE_SEND);
+		break;
+		case CQE_TYPE_INVALID:
+		case CQE_TYPE_RX_SPLIT:
+		case CQE_TYPE_RX_TCP:
+		case CQE_TYPE_SEND_PTP:
+			/* Ignore for now */
+		break;
+		}
+		processed_cqe++;
+	}
+	netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
+		   __func__, processed_cqe, work_done, budget);
+
+	/* Ring doorbell to inform H/W to reuse processed CQEs */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+			      cq_idx, processed_cqe);
+
+	if ((work_done < budget) && napi)
+		goto loop;
+
+done:
+	spin_unlock_bh(&cq->lock);
+	return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+	u64  cq_head;
+	int  work_done = 0;
+	struct net_device *netdev = napi->dev;
+	struct nicvf *nic = netdev_priv(netdev);
+	struct nicvf_cq_poll *cq;
+	struct netdev_queue *txq;
+
+	cq = container_of(napi, struct nicvf_cq_poll, napi);
+	work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+	txq = netdev_get_tx_queue(netdev, cq->cq_idx);
+	if (netif_tx_queue_stopped(txq))
+		netif_tx_wake_queue(txq);
+
+	if (work_done < budget) {
+		/* Slow packet rate, exit polling */
+		napi_complete(napi);
+		/* Re-enable interrupts */
+		cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+					       cq->cq_idx);
+		nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+				      cq->cq_idx, cq_head);
+		nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+	}
+	return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(unsigned long data)
+{
+	struct nicvf *nic = (struct nicvf *)data;
+	struct queue_set *qs = nic->qs;
+	int qidx;
+	u64 status;
+
+	netif_tx_disable(nic->netdev);
+
+	/* Check if it is CQ err */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+					      qidx);
+		if (!(status & CQ_ERR_MASK))
+			continue;
+		/* Process already queued CQEs and reconfig CQ */
+		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+		nicvf_sq_disable(nic, qidx);
+		nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+		nicvf_cmp_queue_config(nic, qs, qidx, true);
+		nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+	}
+
+	netif_tx_start_all_queues(nic->netdev);
+	/* Re-enable Qset error interrupt */
+	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+	struct nicvf *nic = (struct nicvf *)nicvf_irq;
+	u64 intr;
+
+	intr = nicvf_reg_read(nic, NIC_VF_INT);
+	/* Check for spurious interrupt */
+	if (!(intr & NICVF_INTR_MBOX_MASK))
+		return IRQ_HANDLED;
+
+	nicvf_handle_mbx_intr(nic);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+{
+	u64 qidx, intr, clear_intr = 0;
+	u64 cq_intr, rbdr_intr, qs_err_intr;
+	struct nicvf *nic = (struct nicvf *)nicvf_irq;
+	struct queue_set *qs = nic->qs;
+	struct nicvf_cq_poll *cq_poll = NULL;
+
+	intr = nicvf_reg_read(nic, NIC_VF_INT);
+	if (netif_msg_intr(nic))
+		netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+			    nic->netdev->name, intr);
+
+	qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
+	if (qs_err_intr) {
+		/* Disable Qset err interrupt and schedule softirq */
+		nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+		tasklet_hi_schedule(&nic->qs_err_task);
+		clear_intr |= qs_err_intr;
+	}
+
+	/* Disable interrupts and start polling */
+	cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		if (!(cq_intr & (1 << qidx)))
+			continue;
+		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+			continue;
+
+		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+		clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+
+		cq_poll = nic->napi[qidx];
+		/* Schedule NAPI */
+		if (cq_poll)
+			napi_schedule(&cq_poll->napi);
+	}
+
+	/* Handle RBDR interrupts */
+	rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
+	if (rbdr_intr) {
+		/* Disable RBDR interrupt and schedule softirq */
+		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+			if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+				continue;
+			nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+			tasklet_hi_schedule(&nic->rbdr_task);
+			clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
+		}
+	}
+
+	/* Clear interrupts */
+	nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+	return IRQ_HANDLED;
+}
+
+static int nicvf_enable_msix(struct nicvf *nic)
+{
+	int ret, vec;
+
+	nic->num_vec = NIC_VF_MSIX_VECTORS;
+
+	for (vec = 0; vec < nic->num_vec; vec++)
+		nic->msix_entries[vec].entry = vec;
+
+	ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+	if (ret) {
+		netdev_err(nic->netdev,
+			   "Req for #%d msix vectors failed\n", nic->num_vec);
+		return 0;
+	}
+	nic->msix_enabled = 1;
+	return 1;
+}
+
+static void nicvf_disable_msix(struct nicvf *nic)
+{
+	if (nic->msix_enabled) {
+		pci_disable_msix(nic->pdev);
+		nic->msix_enabled = 0;
+		nic->num_vec = 0;
+	}
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+	int irq, free, ret = 0;
+	int vector;
+
+	for_each_cq_irq(irq)
+		sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
+			nic->vf_id, irq);
+
+	for_each_sq_irq(irq)
+		sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
+			nic->vf_id, irq - NICVF_INTR_ID_SQ);
+
+	for_each_rbdr_irq(irq)
+		sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
+			nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+
+	/* Register all interrupts except mailbox */
+	for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+		vector = nic->msix_entries[irq].vector;
+		ret = request_irq(vector, nicvf_intr_handler,
+				  0, nic->irq_name[irq], nic);
+		if (ret)
+			break;
+		nic->irq_allocated[irq] = true;
+	}
+
+	for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+		vector = nic->msix_entries[irq].vector;
+		ret = request_irq(vector, nicvf_intr_handler,
+				  0, nic->irq_name[irq], nic);
+		if (ret)
+			break;
+		nic->irq_allocated[irq] = true;
+	}
+
+	sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
+		"NICVF%d Qset error", nic->vf_id);
+	if (!ret) {
+		vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
+		irq = NICVF_INTR_ID_QS_ERR;
+		ret = request_irq(vector, nicvf_intr_handler,
+				  0, nic->irq_name[irq], nic);
+		if (!ret)
+			nic->irq_allocated[irq] = true;
+	}
+
+	if (ret) {
+		netdev_err(nic->netdev, "Request irq failed\n");
+		for (free = 0; free < irq; free++)
+			free_irq(nic->msix_entries[free].vector, nic);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+	int irq;
+
+	/* Free registered interrupts */
+	for (irq = 0; irq < nic->num_vec; irq++) {
+		if (nic->irq_allocated[irq])
+			free_irq(nic->msix_entries[irq].vector, nic);
+		nic->irq_allocated[irq] = false;
+	}
+
+	/* Disable MSI-X */
+	nicvf_disable_msix(nic);
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+	int ret = 0;
+	int irq = NICVF_INTR_ID_MISC;
+
+	/* Return if mailbox interrupt is already registered */
+	if (nic->msix_enabled)
+		return 0;
+
+	/* Enable MSI-X */
+	if (!nicvf_enable_msix(nic))
+		return 1;
+
+	sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+	/* Register Misc interrupt */
+	ret = request_irq(nic->msix_entries[irq].vector,
+			  nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+	if (ret)
+		return ret;
+	nic->irq_allocated[irq] = true;
+
+	/* Enable mailbox interrupt */
+	nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+	/* Check if VF is able to communicate with PF */
+	if (!nicvf_check_pf_ready(nic)) {
+		nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+		nicvf_unregister_interrupts(nic);
+		return 1;
+	}
+
+	return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	int qid = skb_get_queue_mapping(skb);
+	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+
+	/* Check for minimum packet length */
+	if (skb->len <= ETH_HLEN) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+		netif_tx_stop_queue(txq);
+		nic->drv_stats.tx_busy++;
+		if (netif_msg_tx_err(nic))
+			netdev_warn(netdev,
+				    "%s: Transmit ring full, stopping SQ%d\n",
+				    netdev->name, qid);
+
+		return NETDEV_TX_BUSY;
+	}
+
+	return NETDEV_TX_OK;
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+	int irq, qidx;
+	struct nicvf *nic = netdev_priv(netdev);
+	struct queue_set *qs = nic->qs;
+	struct nicvf_cq_poll *cq_poll = NULL;
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+
+	/* Disable RBDR & QS error interrupts */
+	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+	}
+	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+	/* Wait for pending IRQ handlers to finish */
+	for (irq = 0; irq < nic->num_vec; irq++)
+		synchronize_irq(nic->msix_entries[irq].vector);
+
+	tasklet_kill(&nic->rbdr_task);
+	tasklet_kill(&nic->qs_err_task);
+	if (nic->rb_work_scheduled)
+		cancel_delayed_work_sync(&nic->rbdr_work);
+
+	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+		cq_poll = nic->napi[qidx];
+		if (!cq_poll)
+			continue;
+		nic->napi[qidx] = NULL;
+		napi_synchronize(&cq_poll->napi);
+		/* CQ intr is enabled while napi_complete,
+		 * so disable it now
+		 */
+		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+		nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+		napi_disable(&cq_poll->napi);
+		netif_napi_del(&cq_poll->napi);
+		kfree(cq_poll);
+	}
+
+	/* Free resources */
+	nicvf_config_data_transfer(nic, false);
+
+	/* Disable HW Qset */
+	nicvf_qset_config(nic, false);
+
+	/* disable mailbox interrupt */
+	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+	nicvf_unregister_interrupts(nic);
+
+	return 0;
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+	int err, qidx;
+	struct nicvf *nic = netdev_priv(netdev);
+	struct queue_set *qs = nic->qs;
+	struct nicvf_cq_poll *cq_poll = NULL;
+
+	nic->mtu = netdev->mtu;
+
+	netif_carrier_off(netdev);
+
+	err = nicvf_register_misc_interrupt(nic);
+	if (err)
+		return err;
+
+	/* Register NAPI handler for processing CQEs */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+		if (!cq_poll) {
+			err = -ENOMEM;
+			goto napi_del;
+		}
+		cq_poll->cq_idx = qidx;
+		netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
+			       NAPI_POLL_WEIGHT);
+		napi_enable(&cq_poll->napi);
+		nic->napi[qidx] = cq_poll;
+	}
+
+	/* Check if we got MAC address from PF or else generate a radom MAC */
+	if (is_zero_ether_addr(netdev->dev_addr)) {
+		eth_hw_addr_random(netdev);
+		nicvf_hw_set_mac_addr(nic, netdev);
+	}
+
+	if (nic->set_mac_pending) {
+		nic->set_mac_pending = false;
+		nicvf_hw_set_mac_addr(nic, netdev);
+	}
+
+	/* Init tasklet for handling Qset err interrupt */
+	tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
+		     (unsigned long)nic);
+
+	/* Init RBDR tasklet which will refill RBDR */
+	tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
+		     (unsigned long)nic);
+	INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+	/* Configure CPI alorithm */
+	nic->cpi_alg = cpi_alg;
+	nicvf_config_cpi(nic);
+
+	/* Configure receive side scaling */
+	nicvf_rss_init(nic);
+
+	err = nicvf_register_interrupts(nic);
+	if (err)
+		goto cleanup;
+
+	/* Initialize the queues */
+	err = nicvf_init_resources(nic);
+	if (err)
+		goto cleanup;
+
+	/* Make sure queue initialization is written */
+	wmb();
+
+	nicvf_reg_write(nic, NIC_VF_INT, -1);
+	/* Enable Qset err interrupt */
+	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+	/* Enable completion queue interrupt */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+	/* Enable RBDR threshold interrupt */
+	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+		nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+	netif_carrier_on(netdev);
+	netif_tx_start_all_queues(netdev);
+
+	return 0;
+cleanup:
+	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+	nicvf_unregister_interrupts(nic);
+napi_del:
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		cq_poll = nic->napi[qidx];
+		if (!cq_poll)
+			continue;
+		napi_disable(&cq_poll->napi);
+		netif_napi_del(&cq_poll->napi);
+		kfree(cq_poll);
+		nic->napi[qidx] = NULL;
+	}
+	return err;
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+	union nic_mbx mbx = {};
+
+	mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+	mbx.frs.max_frs = mtu;
+	mbx.frs.vf_id = nic->vf_id;
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if (new_mtu > NIC_HW_MAX_FRS)
+		return -EINVAL;
+
+	if (new_mtu < NIC_HW_MIN_FRS)
+		return -EINVAL;
+
+	if (nicvf_update_hw_max_frs(nic, new_mtu))
+		return -EINVAL;
+	netdev->mtu = new_mtu;
+	nic->mtu = new_mtu;
+
+	return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+	if (nic->msix_enabled) {
+		if (nicvf_hw_set_mac_addr(nic, netdev))
+			return -EBUSY;
+	} else {
+		nic->set_mac_pending = true;
+	}
+
+	return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+	int stat = 0;
+	union nic_mbx mbx = {};
+	int timeout;
+
+	if (!netif_running(nic->netdev))
+		return;
+
+	mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+	mbx.bgx_stats.vf_id = nic->vf_id;
+	/* Rx stats */
+	mbx.bgx_stats.rx = 1;
+	while (stat < BGX_RX_STATS_COUNT) {
+		nic->bgx_stats_acked = 0;
+		mbx.bgx_stats.idx = stat;
+		nicvf_send_msg_to_pf(nic, &mbx);
+		timeout = 0;
+		while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+			msleep(2);
+			timeout++;
+		}
+		stat++;
+	}
+
+	stat = 0;
+
+	/* Tx stats */
+	mbx.bgx_stats.rx = 0;
+	while (stat < BGX_TX_STATS_COUNT) {
+		nic->bgx_stats_acked = 0;
+		mbx.bgx_stats.idx = stat;
+		nicvf_send_msg_to_pf(nic, &mbx);
+		timeout = 0;
+		while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+			msleep(2);
+			timeout++;
+		}
+		stat++;
+	}
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+	int qidx;
+	struct nicvf_hw_stats *stats = &nic->stats;
+	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+	struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+	nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+	nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+	stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
+	stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
+	stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
+	stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+	stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+	stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+	stats->rx_drop_red = GET_RX_STATS(RX_RED);
+	stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+	stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+	stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+	stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+	stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+	stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
+	stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
+	stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
+	stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+	stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+	drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
+				  stats->rx_bcast_frames_ok +
+				  stats->rx_mcast_frames_ok;
+	drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+				  stats->tx_bcast_frames_ok +
+				  stats->tx_mcast_frames_ok;
+	drv_stats->rx_drops = stats->rx_drop_red +
+			      stats->rx_drop_overrun;
+	drv_stats->tx_drops = stats->tx_drops;
+
+	/* Update RQ and SQ stats */
+	for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+		nicvf_update_rq_stats(nic, qidx);
+	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+		nicvf_update_sq_stats(nic, qidx);
+}
+
+static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+					    struct rtnl_link_stats64 *stats)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	struct nicvf_hw_stats *hw_stats = &nic->stats;
+	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+	nicvf_update_stats(nic);
+
+	stats->rx_bytes = hw_stats->rx_bytes_ok;
+	stats->rx_packets = drv_stats->rx_frames_ok;
+	stats->rx_dropped = drv_stats->rx_drops;
+
+	stats->tx_bytes = hw_stats->tx_bytes_ok;
+	stats->tx_packets = drv_stats->tx_frames_ok;
+	stats->tx_dropped = drv_stats->tx_drops;
+
+	return stats;
+}
+
+static void nicvf_tx_timeout(struct net_device *dev)
+{
+	struct nicvf *nic = netdev_priv(dev);
+
+	if (netif_msg_tx_err(nic))
+		netdev_warn(dev, "%s: Transmit timed out, resetting\n",
+			    dev->name);
+
+	schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+	struct nicvf *nic;
+
+	nic = container_of(work, struct nicvf, reset_task);
+
+	if (!netif_running(nic->netdev))
+		return;
+
+	nicvf_stop(nic->netdev);
+	nicvf_open(nic->netdev);
+	nic->netdev->trans_start = jiffies;
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+	.ndo_open		= nicvf_open,
+	.ndo_stop		= nicvf_stop,
+	.ndo_start_xmit		= nicvf_xmit,
+	.ndo_change_mtu		= nicvf_change_mtu,
+	.ndo_set_mac_address	= nicvf_set_mac_address,
+	.ndo_get_stats64	= nicvf_get_stats64,
+	.ndo_tx_timeout         = nicvf_tx_timeout,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct net_device *netdev;
+	struct nicvf *nic;
+	struct queue_set *qs;
+	int    err;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto err_disable_device;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get usable DMA configuration\n");
+		goto err_release_regions;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
+		goto err_release_regions;
+	}
+
+	netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
+				    MAX_RCV_QUEUES_PER_QS,
+				    MAX_SND_QUEUES_PER_QS);
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	pci_set_drvdata(pdev, netdev);
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	nic = netdev_priv(netdev);
+	nic->netdev = netdev;
+	nic->pdev = pdev;
+
+	/* MAP VF's configuration registers */
+	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+	if (!nic->reg_base) {
+		dev_err(dev, "Cannot map config register space, aborting\n");
+		err = -ENOMEM;
+		goto err_free_netdev;
+	}
+
+	err = nicvf_set_qset_resources(nic);
+	if (err)
+		goto err_free_netdev;
+
+	qs = nic->qs;
+
+	err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
+	if (err)
+		goto err_free_netdev;
+
+	/* Check if PF is alive and get MAC address for this VF */
+	err = nicvf_register_misc_interrupt(nic);
+	if (err)
+		goto err_free_netdev;
+
+	netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+			     NETIF_F_TSO | NETIF_F_GRO);
+	netdev->hw_features = netdev->features;
+
+	netdev->netdev_ops = &nicvf_netdev_ops;
+
+	INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(dev, "Failed to register netdevice\n");
+		goto err_unregister_interrupts;
+	}
+
+	nic->msg_enable = debug;
+
+	nicvf_set_ethtool_ops(netdev);
+
+	return 0;
+
+err_unregister_interrupts:
+	nicvf_unregister_interrupts(nic);
+err_free_netdev:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+err_release_regions:
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct nicvf *nic = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+	nicvf_unregister_interrupts(nic);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+	.name = DRV_NAME,
+	.id_table = nicvf_id_table,
+	.probe = nicvf_probe,
+	.remove = nicvf_remove,
+};
+
+static int __init nicvf_init_module(void)
+{
+	pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+	return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+	pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644
index 0000000..d69d228d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+struct rbuf_info {
+	struct page *page;
+	void	*data;
+	u64	offset;
+};
+
+#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+			  u64 reg, int bit_pos, int bits, int val)
+{
+	u64 bit_mask;
+	u64 reg_val;
+	int timeout = 10;
+
+	bit_mask = (1ULL << bits) - 1;
+	bit_mask = (bit_mask << bit_pos);
+
+	while (timeout) {
+		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+		if (((reg_val & bit_mask) >> bit_pos) == val)
+			return 0;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+	return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+				  int q_len, int desc_size, int align_bytes)
+{
+	dmem->q_len = q_len;
+	dmem->size = (desc_size * q_len) + align_bytes;
+	/* Save address, need it while freeing */
+	dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+						&dmem->dma, GFP_KERNEL);
+	if (!dmem->unalign_base)
+		return -ENOMEM;
+
+	/* Align memory address for 'align_bytes' */
+	dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+	dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+	return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+	if (!dmem)
+		return;
+
+	dma_free_coherent(&nic->pdev->dev, dmem->size,
+			  dmem->unalign_base, dmem->dma);
+	dmem->unalign_base = NULL;
+	dmem->base = NULL;
+}
+
+/* Allocate buffer for packet reception
+ * HW returns memory address where packet is DMA'ed but not a pointer
+ * into RBDR ring, so save buffer address at the start of fragment and
+ * align the start address to a cache aligned address
+ */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
+					 u32 buf_len, u64 **rbuf)
+{
+	u64 data;
+	struct rbuf_info *rinfo;
+	int order = get_order(buf_len);
+
+	/* Check if request can be accomodated in previous allocated page */
+	if (nic->rb_page) {
+		if ((nic->rb_page_offset + buf_len + buf_len) >
+		    (PAGE_SIZE << order)) {
+			nic->rb_page = NULL;
+		} else {
+			nic->rb_page_offset += buf_len;
+			get_page(nic->rb_page);
+		}
+	}
+
+	/* Allocate a new page */
+	if (!nic->rb_page) {
+		nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+		if (!nic->rb_page) {
+			netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+			return -ENOMEM;
+		}
+		nic->rb_page_offset = 0;
+	}
+
+	data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
+
+	/* Align buffer addr to cache line i.e 128 bytes */
+	rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
+	/* Save page address for reference updation */
+	rinfo->page = nic->rb_page;
+	/* Store start address for later retrieval */
+	rinfo->data = (void *)data;
+	/* Store alignment offset */
+	rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+
+	data += rinfo->offset;
+
+	/* Give next aligned address to hw for DMA */
+	*rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
+	return 0;
+}
+
+/* Retrieve actual buffer start address and build skb for received packet */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+					   u64 rb_ptr, int len)
+{
+	struct sk_buff *skb;
+	struct rbuf_info *rinfo;
+
+	rb_ptr = (u64)phys_to_virt(rb_ptr);
+	/* Get buffer start address and alignment offset */
+	rinfo = GET_RBUF_INFO(rb_ptr);
+
+	/* Now build an skb to give to stack */
+	skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+	if (!skb) {
+		put_page(rinfo->page);
+		return NULL;
+	}
+
+	/* Set correct skb->data */
+	skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
+
+	prefetch((void *)rb_ptr);
+	return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+			    int ring_len, int buf_size)
+{
+	int idx;
+	u64 *rbuf;
+	struct rbdr_entry_t *desc;
+	int err;
+
+	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+				     sizeof(struct rbdr_entry_t),
+				     NICVF_RCV_BUF_ALIGN_BYTES);
+	if (err)
+		return err;
+
+	rbdr->desc = rbdr->dmem.base;
+	/* Buffer size has to be in multiples of 128 bytes */
+	rbdr->dma_size = buf_size;
+	rbdr->enable = true;
+	rbdr->thresh = RBDR_THRESH;
+
+	nic->rb_page = NULL;
+	for (idx = 0; idx < ring_len; idx++) {
+		err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
+					     &rbuf);
+		if (err)
+			return err;
+
+		desc = GET_RBDR_DESC(rbdr, idx);
+		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+	}
+	return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+	int head, tail;
+	u64 buf_addr;
+	struct rbdr_entry_t *desc;
+	struct rbuf_info *rinfo;
+
+	if (!rbdr)
+		return;
+
+	rbdr->enable = false;
+	if (!rbdr->dmem.base)
+		return;
+
+	head = rbdr->head;
+	tail = rbdr->tail;
+
+	/* Free SKBs */
+	while (head != tail) {
+		desc = GET_RBDR_DESC(rbdr, head);
+		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+		rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+		put_page(rinfo->page);
+		head++;
+		head &= (rbdr->dmem.q_len - 1);
+	}
+	/* Free SKB of tail desc */
+	desc = GET_RBDR_DESC(rbdr, tail);
+	buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+	rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+	put_page(rinfo->page);
+
+	/* Free RBDR ring */
+	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+	struct queue_set *qs = nic->qs;
+	int rbdr_idx = qs->rbdr_cnt;
+	int tail, qcount;
+	int refill_rb_cnt;
+	struct rbdr *rbdr;
+	struct rbdr_entry_t *desc;
+	u64 *rbuf;
+	int new_rb = 0;
+
+refill:
+	if (!rbdr_idx)
+		return;
+	rbdr_idx--;
+	rbdr = &qs->rbdr[rbdr_idx];
+	/* Check if it's enabled */
+	if (!rbdr->enable)
+		goto next_rbdr;
+
+	/* Get no of desc's to be refilled */
+	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+	qcount &= 0x7FFFF;
+	/* Doorbell can be ringed with a max of ring size minus 1 */
+	if (qcount >= (qs->rbdr_len - 1))
+		goto next_rbdr;
+	else
+		refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+	/* Start filling descs from tail */
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+	while (refill_rb_cnt) {
+		tail++;
+		tail &= (rbdr->dmem.q_len - 1);
+
+		if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+			break;
+
+		desc = GET_RBDR_DESC(rbdr, tail);
+		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+		refill_rb_cnt--;
+		new_rb++;
+	}
+
+	/* make sure all memory stores are done before ringing doorbell */
+	smp_wmb();
+
+	/* Check if buffer allocation failed */
+	if (refill_rb_cnt)
+		nic->rb_alloc_fail = true;
+	else
+		nic->rb_alloc_fail = false;
+
+	/* Notify HW */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+			      rbdr_idx, new_rb);
+next_rbdr:
+	/* Re-enable RBDR interrupts only if buffer allocation is success */
+	if (!nic->rb_alloc_fail && rbdr->enable)
+		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+	if (rbdr_idx)
+		goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+	struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+	nicvf_refill_rbdr(nic, GFP_KERNEL);
+	if (nic->rb_alloc_fail)
+		schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+	else
+		nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(unsigned long data)
+{
+	struct nicvf *nic = (struct nicvf *)data;
+
+	nicvf_refill_rbdr(nic, GFP_ATOMIC);
+	if (nic->rb_alloc_fail) {
+		nic->rb_work_scheduled = true;
+		schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+	}
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+				struct cmp_queue *cq, int q_len)
+{
+	int err;
+
+	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+				     NICVF_CQ_BASE_ALIGN_BYTES);
+	if (err)
+		return err;
+
+	cq->desc = cq->dmem.base;
+	cq->thresh = CMP_QUEUE_CQE_THRESH;
+	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+	return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+	if (!cq)
+		return;
+	if (!cq->dmem.base)
+		return;
+
+	nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+				struct snd_queue *sq, int q_len)
+{
+	int err;
+
+	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+				     NICVF_SQ_BASE_ALIGN_BYTES);
+	if (err)
+		return err;
+
+	sq->desc = sq->dmem.base;
+	sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+	if (!sq->skbuff)
+		return -ENOMEM;
+	sq->head = 0;
+	sq->tail = 0;
+	atomic_set(&sq->free_cnt, q_len - 1);
+	sq->thresh = SND_QUEUE_THRESH;
+
+	/* Preallocate memory for TSO segment's header */
+	sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+					  q_len * TSO_HEADER_SIZE,
+					  &sq->tso_hdrs_phys, GFP_KERNEL);
+	if (!sq->tso_hdrs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+	if (!sq)
+		return;
+	if (!sq->dmem.base)
+		return;
+
+	if (sq->tso_hdrs)
+		dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+				  sq->tso_hdrs, sq->tso_hdrs_phys);
+
+	kfree(sq->skbuff);
+	nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+				    struct queue_set *qs, int qidx)
+{
+	/* Disable send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+	/* Check if SQ is stopped */
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+		return;
+	/* Reset send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+				    struct queue_set *qs, int qidx)
+{
+	union nic_mbx mbx = {};
+
+	/* Make sure all packets in the pipeline are written back into mem */
+	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+				    struct queue_set *qs, int qidx)
+{
+	/* Disable timer threshold (doesn't get reset upon CQ reset */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+	/* Disable completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+	/* Reset completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+			       struct rbdr *rbdr, int qidx)
+{
+	u64 tmp, fifo_state;
+	int timeout = 10;
+
+	/* Save head and tail pointers for feeing up buffers */
+	rbdr->head = nicvf_queue_reg_read(nic,
+					  NIC_QSET_RBDR_0_1_HEAD,
+					  qidx) >> 3;
+	rbdr->tail = nicvf_queue_reg_read(nic,
+					  NIC_QSET_RBDR_0_1_TAIL,
+					  qidx) >> 3;
+
+	/* If RBDR FIFO is in 'FAIL' state then do a reset first
+	 * before relaiming.
+	 */
+	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+	if (((fifo_state >> 62) & 0x03) == 0x3)
+		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+				      qidx, NICVF_RBDR_RESET);
+
+	/* Disable RBDR */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+		return;
+	while (1) {
+		tmp = nicvf_queue_reg_read(nic,
+					   NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+					   qidx);
+		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+		if (!timeout) {
+			netdev_err(nic->netdev,
+				   "Failed polling on prefetch status\n");
+			return;
+		}
+	}
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+			      qidx, NICVF_RBDR_RESET);
+
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+		return;
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+		return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+				   int qidx, bool enable)
+{
+	union nic_mbx mbx = {};
+	struct rcv_queue *rq;
+	struct rq_cfg rq_cfg;
+
+	rq = &qs->rq[qidx];
+	rq->enable = enable;
+
+	/* Disable receive queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+	if (!rq->enable) {
+		nicvf_reclaim_rcv_queue(nic, qs, qidx);
+		return;
+	}
+
+	rq->cq_qs = qs->vnic_id;
+	rq->cq_idx = qidx;
+	rq->start_rbdr_qs = qs->vnic_id;
+	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+	rq->cont_rbdr_qs = qs->vnic_id;
+	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+	/* all writes of RBDR data to be loaded into L2 Cache as well*/
+	rq->caching = 1;
+
+	/* Send a mailbox msg to PF to config RQ */
+	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+	mbx.rq.qs_num = qs->vnic_id;
+	mbx.rq.rq_num = qidx;
+	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+			  (rq->cont_qs_rbdr_idx << 8) |
+			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+	mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	/* RQ drop config
+	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
+	 */
+	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+
+	/* Enable Receive queue */
+	rq_cfg.ena = 1;
+	rq_cfg.tcp_ena = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+			    int qidx, bool enable)
+{
+	struct cmp_queue *cq;
+	struct cq_cfg cq_cfg;
+
+	cq = &qs->cq[qidx];
+	cq->enable = enable;
+
+	if (!cq->enable) {
+		nicvf_reclaim_cmp_queue(nic, qs, qidx);
+		return;
+	}
+
+	/* Reset completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+	if (!cq->enable)
+		return;
+
+	spin_lock_init(&cq->lock);
+	/* Set completion queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+			      qidx, (u64)(cq->dmem.phys_base));
+
+	/* Enable Completion queue */
+	cq_cfg.ena = 1;
+	cq_cfg.reset = 0;
+	cq_cfg.caching = 0;
+	cq_cfg.qsize = CMP_QSIZE;
+	cq_cfg.avg_con = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+			      qidx, nic->cq_coalesce_usecs);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+				   int qidx, bool enable)
+{
+	union nic_mbx mbx = {};
+	struct snd_queue *sq;
+	struct sq_cfg sq_cfg;
+
+	sq = &qs->sq[qidx];
+	sq->enable = enable;
+
+	if (!sq->enable) {
+		nicvf_reclaim_snd_queue(nic, qs, qidx);
+		return;
+	}
+
+	/* Reset send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+	sq->cq_qs = qs->vnic_id;
+	sq->cq_idx = qidx;
+
+	/* Send a mailbox msg to PF to config SQ */
+	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+	mbx.sq.qs_num = qs->vnic_id;
+	mbx.sq.sq_num = qidx;
+	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	/* Set queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+			      qidx, (u64)(sq->dmem.phys_base));
+
+	/* Enable send queue  & set queue size */
+	sq_cfg.ena = 1;
+	sq_cfg.reset = 0;
+	sq_cfg.ldwb = 0;
+	sq_cfg.qsize = SND_QSIZE;
+	sq_cfg.tstmp_bgx_intf = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+	/* Set queue:cpu affinity for better load distribution */
+	if (cpu_online(qidx)) {
+		cpumask_set_cpu(qidx, &sq->affinity_mask);
+		netif_set_xps_queue(nic->netdev,
+				    &sq->affinity_mask, qidx);
+	}
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+			      int qidx, bool enable)
+{
+	struct rbdr *rbdr;
+	struct rbdr_cfg rbdr_cfg;
+
+	rbdr = &qs->rbdr[qidx];
+	nicvf_reclaim_rbdr(nic, rbdr, qidx);
+	if (!enable)
+		return;
+
+	/* Set descriptor base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+			      qidx, (u64)(rbdr->dmem.phys_base));
+
+	/* Enable RBDR  & set queue size */
+	/* Buffer size should be in multiples of 128 bytes */
+	rbdr_cfg.ena = 1;
+	rbdr_cfg.reset = 0;
+	rbdr_cfg.ldwb = 0;
+	rbdr_cfg.qsize = RBDR_SIZE;
+	rbdr_cfg.avg_con = 0;
+	rbdr_cfg.lines = rbdr->dma_size / 128;
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+			      qidx, *(u64 *)&rbdr_cfg);
+
+	/* Notify HW */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+			      qidx, qs->rbdr_len - 1);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+			      qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+	union nic_mbx mbx = {};
+	struct queue_set *qs = nic->qs;
+	struct qs_cfg *qs_cfg;
+
+	if (!qs) {
+		netdev_warn(nic->netdev,
+			    "Qset is still not allocated, don't init queues\n");
+		return;
+	}
+
+	qs->enable = enable;
+	qs->vnic_id = nic->vf_id;
+
+	/* Send a mailbox msg to PF to config Qset */
+	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+	mbx.qs.num = qs->vnic_id;
+
+	mbx.qs.cfg = 0;
+	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+	if (qs->enable) {
+		qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+		qs_cfg->be = 1;
+#endif
+		qs_cfg->vnic = qs->vnic_id;
+	}
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+	int qidx;
+	struct queue_set *qs = nic->qs;
+
+	/* Free receive buffer descriptor ring */
+	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+	/* Free completion queue */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+	/* Free send queue */
+	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+	int qidx;
+	struct queue_set *qs = nic->qs;
+
+	/* Alloc receive buffer descriptor ring */
+	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+				    DMA_BUFFER_LEN))
+			goto alloc_fail;
+	}
+
+	/* Alloc send queue */
+	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+			goto alloc_fail;
+	}
+
+	/* Alloc completion queue */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+			goto alloc_fail;
+	}
+
+	return 0;
+alloc_fail:
+	nicvf_free_resources(nic);
+	return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+	struct queue_set *qs;
+
+	qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+	if (!qs)
+		return -ENOMEM;
+	nic->qs = qs;
+
+	/* Set count of each queue */
+	qs->rbdr_cnt = RBDR_CNT;
+	qs->rq_cnt = RCV_QUEUE_CNT;
+	qs->sq_cnt = SND_QUEUE_CNT;
+	qs->cq_cnt = CMP_QUEUE_CNT;
+
+	/* Set queue lengths */
+	qs->rbdr_len = RCV_BUF_COUNT;
+	qs->sq_len = SND_QUEUE_LEN;
+	qs->cq_len = CMP_QUEUE_LEN;
+	return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+	bool disable = false;
+	struct queue_set *qs = nic->qs;
+	int qidx;
+
+	if (!qs)
+		return 0;
+
+	if (enable) {
+		if (nicvf_alloc_resources(nic))
+			return -ENOMEM;
+
+		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+			nicvf_snd_queue_config(nic, qs, qidx, enable);
+		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+			nicvf_cmp_queue_config(nic, qs, qidx, enable);
+		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+			nicvf_rbdr_config(nic, qs, qidx, enable);
+		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+			nicvf_rcv_queue_config(nic, qs, qidx, enable);
+	} else {
+		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+			nicvf_rcv_queue_config(nic, qs, qidx, disable);
+		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+			nicvf_rbdr_config(nic, qs, qidx, disable);
+		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+			nicvf_snd_queue_config(nic, qs, qidx, disable);
+		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+			nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+		nicvf_free_resources(nic);
+	}
+
+	return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+	int qentry;
+
+	qentry = sq->tail;
+	atomic_sub(desc_cnt, &sq->free_cnt);
+	sq->tail += desc_cnt;
+	sq->tail &= (sq->dmem.q_len - 1);
+
+	return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+	atomic_add(desc_cnt, &sq->free_cnt);
+	sq->head += desc_cnt;
+	sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+	qentry++;
+	qentry &= (sq->dmem.q_len - 1);
+	return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+	u64 sq_cfg;
+
+	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+	sq_cfg |= NICVF_SQ_EN;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+	/* Ring doorbell so that H/W restarts processing SQEs */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+	u64 sq_cfg;
+
+	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+	sq_cfg &= ~NICVF_SQ_EN;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+			      int qidx)
+{
+	u64 head, tail;
+	struct sk_buff *skb;
+	struct nicvf *nic = netdev_priv(netdev);
+	struct sq_hdr_subdesc *hdr;
+
+	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+	while (sq->head != head) {
+		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+			nicvf_put_sq_desc(sq, 1);
+			continue;
+		}
+		skb = (struct sk_buff *)sq->skbuff[sq->head];
+		atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+		atomic64_add(hdr->tot_len,
+			     (atomic64_t *)&netdev->stats.tx_bytes);
+		dev_kfree_skb_any(skb);
+		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+	}
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+	struct skb_shared_info *sh = skb_shinfo(skb);
+	unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	unsigned int data_len = skb->len - sh_len;
+	unsigned int p_len = sh->gso_size;
+	long f_id = -1;    /* id of the current fragment */
+	long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
+	long f_used = 0;  /* bytes used from the current fragment */
+	long n;            /* size of the current piece of payload */
+	int num_edescs = 0;
+	int segment;
+
+	for (segment = 0; segment < sh->gso_segs; segment++) {
+		unsigned int p_used = 0;
+
+		/* One edesc for header and for each piece of the payload. */
+		for (num_edescs++; p_used < p_len; num_edescs++) {
+			/* Advance as needed. */
+			while (f_used >= f_size) {
+				f_id++;
+				f_size = skb_frag_size(&sh->frags[f_id]);
+				f_used = 0;
+			}
+
+			/* Use bytes from the current fragment. */
+			n = p_len - p_used;
+			if (n > f_size - f_used)
+				n = f_size - f_used;
+			f_used += n;
+			p_used += n;
+		}
+
+		/* The last segment may be less than gso_size. */
+		data_len -= p_len;
+		if (data_len < p_len)
+			p_len = data_len;
+	}
+
+	/* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+	return num_edescs + sh->gso_segs;
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+	int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+	if (skb_shinfo(skb)->gso_size) {
+		subdesc_cnt = nicvf_tso_count_subdescs(skb);
+		return subdesc_cnt;
+	}
+
+	if (skb_shinfo(skb)->nr_frags)
+		subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+	return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+			 int subdesc_cnt, struct sk_buff *skb, int len)
+{
+	int proto;
+	struct sq_hdr_subdesc *hdr;
+
+	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+	sq->skbuff[qentry] = (u64)skb;
+
+	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+	/* Enable notification via CQE after processing SQE */
+	hdr->post_cqe = 1;
+	/* No of subdescriptors following this */
+	hdr->subdesc_cnt = subdesc_cnt;
+	hdr->tot_len = len;
+
+	/* Offload checksum calculation to HW */
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		if (skb->protocol != htons(ETH_P_IP))
+			return;
+
+		hdr->csum_l3 = 1; /* Enable IP csum calculation */
+		hdr->l3_offset = skb_network_offset(skb);
+		hdr->l4_offset = skb_transport_offset(skb);
+
+		proto = ip_hdr(skb)->protocol;
+		switch (proto) {
+		case IPPROTO_TCP:
+			hdr->csum_l4 = SEND_L4_CSUM_TCP;
+			break;
+		case IPPROTO_UDP:
+			hdr->csum_l4 = SEND_L4_CSUM_UDP;
+			break;
+		case IPPROTO_SCTP:
+			hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+			break;
+		}
+	}
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+					       int size, u64 data)
+{
+	struct sq_gather_subdesc *gather;
+
+	qentry &= (sq->dmem.q_len - 1);
+	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+	memset(gather, 0, SND_QUEUE_DESC_SIZE);
+	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+	gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+	gather->size = size;
+	gather->addr = data;
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+			       int qentry, struct sk_buff *skb)
+{
+	struct tso_t tso;
+	int seg_subdescs = 0, desc_cnt = 0;
+	int seg_len, total_len, data_left;
+	int hdr_qentry = qentry;
+	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+	tso_start(skb, &tso);
+	total_len = skb->len - hdr_len;
+	while (total_len > 0) {
+		char *hdr;
+
+		/* Save Qentry for adding HDR_SUBDESC at the end */
+		hdr_qentry = qentry;
+
+		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+		total_len -= data_left;
+
+		/* Add segment's header */
+		qentry = nicvf_get_nxt_sqentry(sq, qentry);
+		hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+		nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+					    sq->tso_hdrs_phys +
+					    qentry * TSO_HEADER_SIZE);
+		/* HDR_SUDESC + GATHER */
+		seg_subdescs = 2;
+		seg_len = hdr_len;
+
+		/* Add segment's payload fragments */
+		while (data_left > 0) {
+			int size;
+
+			size = min_t(int, tso.size, data_left);
+
+			qentry = nicvf_get_nxt_sqentry(sq, qentry);
+			nicvf_sq_add_gather_subdesc(sq, qentry, size,
+						    virt_to_phys(tso.data));
+			seg_subdescs++;
+			seg_len += size;
+
+			data_left -= size;
+			tso_build_data(skb, &tso, size);
+		}
+		nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+					 seg_subdescs - 1, skb, seg_len);
+		sq->skbuff[hdr_qentry] = 0;
+		qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+		desc_cnt += seg_subdescs;
+	}
+	/* Save SKB in the last segment for freeing */
+	sq->skbuff[hdr_qentry] = (u64)skb;
+
+	/* make sure all memory stores are done before ringing doorbell */
+	smp_wmb();
+
+	/* Inform HW to xmit all TSO segments */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+			      skb_get_queue_mapping(skb), desc_cnt);
+	return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+{
+	int i, size;
+	int subdesc_cnt;
+	int sq_num, qentry;
+	struct queue_set *qs = nic->qs;
+	struct snd_queue *sq;
+
+	sq_num = skb_get_queue_mapping(skb);
+	sq = &qs->sq[sq_num];
+
+	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+	if (subdesc_cnt > atomic_read(&sq->free_cnt))
+		goto append_fail;
+
+	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+	/* Check if its a TSO packet */
+	if (skb_shinfo(skb)->gso_size)
+		return nicvf_sq_append_tso(nic, sq, qentry, skb);
+
+	/* Add SQ header subdesc */
+	nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+
+	/* Add SQ gather subdescs */
+	qentry = nicvf_get_nxt_sqentry(sq, qentry);
+	size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+	nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+
+	/* Check for scattered buffer */
+	if (!skb_is_nonlinear(skb))
+		goto doorbell;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[i];
+
+		qentry = nicvf_get_nxt_sqentry(sq, qentry);
+		size = skb_frag_size(frag);
+		nicvf_sq_add_gather_subdesc(sq, qentry, size,
+					    virt_to_phys(
+					    skb_frag_address(frag)));
+	}
+
+doorbell:
+	/* make sure all memory stores are done before ringing doorbell */
+	smp_wmb();
+
+	/* Inform HW to xmit new packet */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+			      sq_num, subdesc_cnt);
+	return 1;
+
+append_fail:
+	netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+	return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+	return (i & ~3) + 3 - (i & 3);
+#else
+	return i;
+#endif
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+	int frag;
+	int payload_len = 0;
+	struct sk_buff *skb = NULL;
+	struct sk_buff *skb_frag = NULL;
+	struct sk_buff *prev_frag = NULL;
+	u16 *rb_lens = NULL;
+	u64 *rb_ptrs = NULL;
+
+	rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+	rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+
+	netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
+		   __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
+
+	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+		payload_len = rb_lens[frag_num(frag)];
+		if (!frag) {
+			/* First fragment */
+			skb = nicvf_rb_ptr_to_skb(nic,
+						  *rb_ptrs - cqe_rx->align_pad,
+						  payload_len);
+			if (!skb)
+				return NULL;
+			skb_reserve(skb, cqe_rx->align_pad);
+			skb_put(skb, payload_len);
+		} else {
+			/* Add fragments */
+			skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
+						       payload_len);
+			if (!skb_frag) {
+				dev_kfree_skb(skb);
+				return NULL;
+			}
+
+			if (!skb_shinfo(skb)->frag_list)
+				skb_shinfo(skb)->frag_list = skb_frag;
+			else
+				prev_frag->next = skb_frag;
+
+			prev_frag = skb_frag;
+			skb->len += payload_len;
+			skb->data_len += payload_len;
+			skb_frag->len = payload_len;
+		}
+		/* Next buffer pointer */
+		rb_ptrs++;
+	}
+	return skb;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+	u64 reg_val;
+
+	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+	switch (int_type) {
+	case NICVF_INTR_CQ:
+		reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+		break;
+	case NICVF_INTR_SQ:
+		reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+		break;
+	case NICVF_INTR_RBDR:
+		reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+		break;
+	case NICVF_INTR_PKT_DROP:
+		reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+		break;
+	case NICVF_INTR_TCP_TIMER:
+		reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+		break;
+	case NICVF_INTR_MBOX:
+		reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+		break;
+	case NICVF_INTR_QS_ERR:
+		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+		break;
+	default:
+		netdev_err(nic->netdev,
+			   "Failed to enable interrupt: unknown type\n");
+		break;
+	}
+
+	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+	u64 reg_val = 0;
+
+	switch (int_type) {
+	case NICVF_INTR_CQ:
+		reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+		break;
+	case NICVF_INTR_SQ:
+		reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+		break;
+	case NICVF_INTR_RBDR:
+		reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+		break;
+	case NICVF_INTR_PKT_DROP:
+		reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+		break;
+	case NICVF_INTR_TCP_TIMER:
+		reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+		break;
+	case NICVF_INTR_MBOX:
+		reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+		break;
+	case NICVF_INTR_QS_ERR:
+		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+		break;
+	default:
+		netdev_err(nic->netdev,
+			   "Failed to disable interrupt: unknown type\n");
+		break;
+	}
+
+	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+	u64 reg_val = 0;
+
+	switch (int_type) {
+	case NICVF_INTR_CQ:
+		reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+		break;
+	case NICVF_INTR_SQ:
+		reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+		break;
+	case NICVF_INTR_RBDR:
+		reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+		break;
+	case NICVF_INTR_PKT_DROP:
+		reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+		break;
+	case NICVF_INTR_TCP_TIMER:
+		reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+		break;
+	case NICVF_INTR_MBOX:
+		reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+		break;
+	case NICVF_INTR_QS_ERR:
+		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+		break;
+	default:
+		netdev_err(nic->netdev,
+			   "Failed to clear interrupt: unknown type\n");
+		break;
+	}
+
+	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+	u64 reg_val;
+	u64 mask = 0xff;
+
+	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+	switch (int_type) {
+	case NICVF_INTR_CQ:
+		mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+		break;
+	case NICVF_INTR_SQ:
+		mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+		break;
+	case NICVF_INTR_RBDR:
+		mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+		break;
+	case NICVF_INTR_PKT_DROP:
+		mask = NICVF_INTR_PKT_DROP_MASK;
+		break;
+	case NICVF_INTR_TCP_TIMER:
+		mask = NICVF_INTR_TCP_TIMER_MASK;
+		break;
+	case NICVF_INTR_MBOX:
+		mask = NICVF_INTR_MBOX_MASK;
+		break;
+	case NICVF_INTR_QS_ERR:
+		mask = NICVF_INTR_QS_ERR_MASK;
+		break;
+	default:
+		netdev_err(nic->netdev,
+			   "Failed to check interrupt enable: unknown type\n");
+		break;
+	}
+
+	return (reg_val & mask);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+	struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+	rq = &nic->qs->rq[rq_idx];
+	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+	struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+	sq = &nic->qs->sq[sq_idx];
+	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+{
+	struct cmp_queue_stats *stats = &cq->stats;
+
+	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+		stats->rx.errop.good++;
+		return 0;
+	}
+
+	if (netif_msg_rx_err(nic))
+		netdev_err(nic->netdev,
+			   "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
+			   nic->netdev->name,
+			   cqe_rx->err_level, cqe_rx->err_opcode);
+
+	switch (cqe_rx->err_level) {
+	case CQ_ERRLVL_MAC:
+		stats->rx.errlvl.mac_errs++;
+		break;
+	case CQ_ERRLVL_L2:
+		stats->rx.errlvl.l2_errs++;
+		break;
+	case CQ_ERRLVL_L3:
+		stats->rx.errlvl.l3_errs++;
+		break;
+	case CQ_ERRLVL_L4:
+		stats->rx.errlvl.l4_errs++;
+		break;
+	}
+
+	switch (cqe_rx->err_opcode) {
+	case CQ_RX_ERROP_RE_PARTIAL:
+		stats->rx.errop.partial_pkts++;
+		break;
+	case CQ_RX_ERROP_RE_JABBER:
+		stats->rx.errop.jabber_errs++;
+		break;
+	case CQ_RX_ERROP_RE_FCS:
+		stats->rx.errop.fcs_errs++;
+		break;
+	case CQ_RX_ERROP_RE_TERMINATE:
+		stats->rx.errop.terminate_errs++;
+		break;
+	case CQ_RX_ERROP_RE_RX_CTL:
+		stats->rx.errop.bgx_rx_errs++;
+		break;
+	case CQ_RX_ERROP_PREL2_ERR:
+		stats->rx.errop.prel2_errs++;
+		break;
+	case CQ_RX_ERROP_L2_FRAGMENT:
+		stats->rx.errop.l2_frags++;
+		break;
+	case CQ_RX_ERROP_L2_OVERRUN:
+		stats->rx.errop.l2_overruns++;
+		break;
+	case CQ_RX_ERROP_L2_PFCS:
+		stats->rx.errop.l2_pfcs++;
+		break;
+	case CQ_RX_ERROP_L2_PUNY:
+		stats->rx.errop.l2_puny++;
+		break;
+	case CQ_RX_ERROP_L2_MAL:
+		stats->rx.errop.l2_hdr_malformed++;
+		break;
+	case CQ_RX_ERROP_L2_OVERSIZE:
+		stats->rx.errop.l2_oversize++;
+		break;
+	case CQ_RX_ERROP_L2_UNDERSIZE:
+		stats->rx.errop.l2_undersize++;
+		break;
+	case CQ_RX_ERROP_L2_LENMISM:
+		stats->rx.errop.l2_len_mismatch++;
+		break;
+	case CQ_RX_ERROP_L2_PCLP:
+		stats->rx.errop.l2_pclp++;
+		break;
+	case CQ_RX_ERROP_IP_NOT:
+		stats->rx.errop.non_ip++;
+		break;
+	case CQ_RX_ERROP_IP_CSUM_ERR:
+		stats->rx.errop.ip_csum_err++;
+		break;
+	case CQ_RX_ERROP_IP_MAL:
+		stats->rx.errop.ip_hdr_malformed++;
+		break;
+	case CQ_RX_ERROP_IP_MALD:
+		stats->rx.errop.ip_payload_malformed++;
+		break;
+	case CQ_RX_ERROP_IP_HOP:
+		stats->rx.errop.ip_hop_errs++;
+		break;
+	case CQ_RX_ERROP_L3_ICRC:
+		stats->rx.errop.l3_icrc_errs++;
+		break;
+	case CQ_RX_ERROP_L3_PCLP:
+		stats->rx.errop.l3_pclp++;
+		break;
+	case CQ_RX_ERROP_L4_MAL:
+		stats->rx.errop.l4_malformed++;
+		break;
+	case CQ_RX_ERROP_L4_CHK:
+		stats->rx.errop.l4_csum_errs++;
+		break;
+	case CQ_RX_ERROP_UDP_LEN:
+		stats->rx.errop.udp_len_err++;
+		break;
+	case CQ_RX_ERROP_L4_PORT:
+		stats->rx.errop.bad_l4_port++;
+		break;
+	case CQ_RX_ERROP_TCP_FLAG:
+		stats->rx.errop.bad_tcp_flag++;
+		break;
+	case CQ_RX_ERROP_TCP_OFFSET:
+		stats->rx.errop.tcp_offset_errs++;
+		break;
+	case CQ_RX_ERROP_L4_PCLP:
+		stats->rx.errop.l4_pclp++;
+		break;
+	case CQ_RX_ERROP_RBDR_TRUNC:
+		stats->rx.errop.pkt_truncated++;
+		break;
+	}
+
+	return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+{
+	struct cmp_queue_stats *stats = &cq->stats;
+
+	switch (cqe_tx->send_status) {
+	case CQ_TX_ERROP_GOOD:
+		stats->tx.good++;
+		return 0;
+	case CQ_TX_ERROP_DESC_FAULT:
+		stats->tx.desc_fault++;
+		break;
+	case CQ_TX_ERROP_HDR_CONS_ERR:
+		stats->tx.hdr_cons_err++;
+		break;
+	case CQ_TX_ERROP_SUBDC_ERR:
+		stats->tx.subdesc_err++;
+		break;
+	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+		stats->tx.imm_size_oflow++;
+		break;
+	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+		stats->tx.data_seq_err++;
+		break;
+	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+		stats->tx.mem_seq_err++;
+		break;
+	case CQ_TX_ERROP_LOCK_VIOL:
+		stats->tx.lock_viol++;
+		break;
+	case CQ_TX_ERROP_DATA_FAULT:
+		stats->tx.data_fault++;
+		break;
+	case CQ_TX_ERROP_TSTMP_CONFLICT:
+		stats->tx.tstmp_conflict++;
+		break;
+	case CQ_TX_ERROP_TSTMP_TIMEOUT:
+		stats->tx.tstmp_timeout++;
+		break;
+	case CQ_TX_ERROP_MEM_FAULT:
+		stats->tx.mem_fault++;
+		break;
+	case CQ_TX_ERROP_CK_OVERLAP:
+		stats->tx.csum_overlap++;
+		break;
+	case CQ_TX_ERROP_CK_OFLOW:
+		stats->tx.csum_overflow++;
+		break;
+	}
+
+	return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644
index 0000000..8341bdf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET			128
+#define MAX_RCV_QUEUES_PER_QS		8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS	2
+#define MAX_SND_QUEUES_PER_QS		8
+#define MAX_CMP_QUEUES_PER_QS		8
+
+/* VF's queue interrupt ranges */
+#define	NICVF_INTR_ID_CQ		0
+#define	NICVF_INTR_ID_SQ		8
+#define	NICVF_INTR_ID_RBDR		16
+#define	NICVF_INTR_ID_MISC		18
+#define	NICVF_INTR_ID_QS_ERR		19
+
+#define	for_each_cq_irq(irq)	\
+	for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define	for_each_sq_irq(irq)	\
+	for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define	for_each_rbdr_irq(irq)	\
+	for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0		0ULL /* 8K entries */
+#define RBDR_SIZE1		1ULL /* 16K entries */
+#define RBDR_SIZE2		2ULL /* 32K entries */
+#define RBDR_SIZE3		3ULL /* 64K entries */
+#define RBDR_SIZE4		4ULL /* 126K entries */
+#define RBDR_SIZE5		5ULL /* 256K entries */
+#define RBDR_SIZE6		6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0		0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1		1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2		2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3		3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4		4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5		5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6		6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0		0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1		1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2		2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3		3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4		4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5		5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6		6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT		1
+#define RCV_QUEUE_CNT		8
+#define SND_QUEUE_CNT		8
+#define CMP_QUEUE_CNT		8 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE		SND_QUEUE_SIZE4
+#define SND_QUEUE_LEN		(1ULL << (SND_QSIZE + 10))
+#define MAX_SND_QUEUE_LEN	(1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH	2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT	2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT		1
+
+#define CMP_QSIZE		CMP_QUEUE_SIZE4
+#define CMP_QUEUE_LEN		(1ULL << (CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH	0
+#define CMP_QUEUE_TIMER_THRESH	220 /* 10usec */
+
+#define RBDR_SIZE		RBDR_SIZE0
+#define RCV_BUF_COUNT		(1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT	(1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH		(RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN		2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN	(SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+			 (NICVF_RCV_BUF_ALIGN_BYTES * 2))
+#define RCV_DATA_OFFSET		NICVF_RCV_BUF_ALIGN_BYTES
+
+#define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+				 MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP		((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE	16
+#define CMP_QUEUE_DESC_SIZE	512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN		7
+#define NICVF_RCV_BUF_ALIGN_BYTES	(1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES	512  /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES	128  /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)	ALIGN(ADDR, ALIGN_BYTES)
+#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
+	(NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
+#define NICVF_RCV_BUF_ALIGN_LEN(X)\
+	(NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN		BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET		BIT_ULL(41)
+#define NICVF_SQ_RESET		BIT_ULL(17)
+#define NICVF_RBDR_RESET	BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+	CQ_ERRLVL_MAC,
+	CQ_ERRLVL_L2,
+	CQ_ERRLVL_L3,
+	CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+	CQ_RX_ERROP_RE_NONE = 0x0,
+	CQ_RX_ERROP_RE_PARTIAL = 0x1,
+	CQ_RX_ERROP_RE_JABBER = 0x2,
+	CQ_RX_ERROP_RE_FCS = 0x7,
+	CQ_RX_ERROP_RE_TERMINATE = 0x9,
+	CQ_RX_ERROP_RE_RX_CTL = 0xb,
+	CQ_RX_ERROP_PREL2_ERR = 0x1f,
+	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+	CQ_RX_ERROP_L2_OVERRUN = 0x21,
+	CQ_RX_ERROP_L2_PFCS = 0x22,
+	CQ_RX_ERROP_L2_PUNY = 0x23,
+	CQ_RX_ERROP_L2_MAL = 0x24,
+	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+	CQ_RX_ERROP_L2_LENMISM = 0x27,
+	CQ_RX_ERROP_L2_PCLP = 0x28,
+	CQ_RX_ERROP_IP_NOT = 0x41,
+	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+	CQ_RX_ERROP_IP_MAL = 0x43,
+	CQ_RX_ERROP_IP_MALD = 0x44,
+	CQ_RX_ERROP_IP_HOP = 0x45,
+	CQ_RX_ERROP_L3_ICRC = 0x46,
+	CQ_RX_ERROP_L3_PCLP = 0x47,
+	CQ_RX_ERROP_L4_MAL = 0x61,
+	CQ_RX_ERROP_L4_CHK = 0x62,
+	CQ_RX_ERROP_UDP_LEN = 0x63,
+	CQ_RX_ERROP_L4_PORT = 0x64,
+	CQ_RX_ERROP_TCP_FLAG = 0x65,
+	CQ_RX_ERROP_TCP_OFFSET = 0x66,
+	CQ_RX_ERROP_L4_PCLP = 0x67,
+	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+	CQ_TX_ERROP_GOOD = 0x0,
+	CQ_TX_ERROP_DESC_FAULT = 0x10,
+	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+	CQ_TX_ERROP_SUBDC_ERR = 0x12,
+	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+	CQ_TX_ERROP_LOCK_VIOL = 0x83,
+	CQ_TX_ERROP_DATA_FAULT = 0x84,
+	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+	CQ_TX_ERROP_MEM_FAULT = 0x87,
+	CQ_TX_ERROP_CK_OVERLAP = 0x88,
+	CQ_TX_ERROP_CK_OFLOW = 0x89,
+	CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+	struct rx_stats {
+		struct {
+			u64 mac_errs;
+			u64 l2_errs;
+			u64 l3_errs;
+			u64 l4_errs;
+		} errlvl;
+		struct {
+			u64 good;
+			u64 partial_pkts;
+			u64 jabber_errs;
+			u64 fcs_errs;
+			u64 terminate_errs;
+			u64 bgx_rx_errs;
+			u64 prel2_errs;
+			u64 l2_frags;
+			u64 l2_overruns;
+			u64 l2_pfcs;
+			u64 l2_puny;
+			u64 l2_hdr_malformed;
+			u64 l2_oversize;
+			u64 l2_undersize;
+			u64 l2_len_mismatch;
+			u64 l2_pclp;
+			u64 non_ip;
+			u64 ip_csum_err;
+			u64 ip_hdr_malformed;
+			u64 ip_payload_malformed;
+			u64 ip_hop_errs;
+			u64 l3_icrc_errs;
+			u64 l3_pclp;
+			u64 l4_malformed;
+			u64 l4_csum_errs;
+			u64 udp_len_err;
+			u64 bad_l4_port;
+			u64 bad_tcp_flag;
+			u64 tcp_offset_errs;
+			u64 l4_pclp;
+			u64 pkt_truncated;
+		} errop;
+	} rx;
+	struct tx_stats {
+		u64 good;
+		u64 desc_fault;
+		u64 hdr_cons_err;
+		u64 subdesc_err;
+		u64 imm_size_oflow;
+		u64 data_seq_err;
+		u64 mem_seq_err;
+		u64 lock_viol;
+		u64 data_fault;
+		u64 tstmp_conflict;
+		u64 tstmp_timeout;
+		u64 mem_fault;
+		u64 csum_overlap;
+		u64 csum_overflow;
+	} tx;
+} ____cacheline_aligned_in_smp;
+
+enum RQ_SQ_STATS {
+	RQ_SQ_STATS_OCTS,
+	RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+	u64	bytes;
+	u64	pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+	dma_addr_t	dma;
+	u64		size;
+	u16		q_len;
+	dma_addr_t	phys_base;
+	void		*base;
+	void		*unalign_base;
+};
+
+struct rbdr {
+	bool		enable;
+	u32		dma_size;
+	u32		frag_len;
+	u32		thresh;		/* Threshold level for interrupt */
+	void		*desc;
+	u32		head;
+	u32		tail;
+	struct q_desc_mem   dmem;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+	bool		enable;
+	struct	rbdr	*rbdr_start;
+	struct	rbdr	*rbdr_cont;
+	bool		en_tcp_reassembly;
+	u8		cq_qs;  /* CQ's QS to which this RQ is assigned */
+	u8		cq_idx; /* CQ index (0 to 7) in the QS */
+	u8		cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
+	u8		cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
+	u8		start_rbdr_qs;     /* First buffer ptrs - QS num */
+	u8		start_qs_rbdr_idx; /* RBDR idx in the above QS */
+	u8		caching;
+	struct		rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+	bool		enable;
+	u16		thresh;
+	spinlock_t	lock;  /* lock to serialize processing CQEs */
+	void		*desc;
+	struct q_desc_mem   dmem;
+	struct cmp_queue_stats	stats;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+	bool		enable;
+	u8		cq_qs;  /* CQ's QS to which this SQ is pointing */
+	u8		cq_idx; /* CQ index (0 to 7) in the above QS */
+	u16		thresh;
+	atomic_t	free_cnt;
+	u32		head;
+	u32		tail;
+	u64		*skbuff;
+	void		*desc;
+
+#define	TSO_HEADER_SIZE	128
+	/* For TSO segment's header */
+	char		*tso_hdrs;
+	dma_addr_t	tso_hdrs_phys;
+
+	cpumask_t	affinity_mask;
+	struct q_desc_mem   dmem;
+	struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+	bool		enable;
+	bool		be_en;
+	u8		vnic_id;
+	u8		rq_cnt;
+	u8		cq_cnt;
+	u64		cq_len;
+	u8		sq_cnt;
+	u64		sq_len;
+	u8		rbdr_cnt;
+	u64		rbdr_len;
+	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
+	struct	cmp_queue	cq[MAX_CMP_QUEUES_PER_QS];
+	struct	snd_queue	sq[MAX_SND_QUEUES_PER_QS];
+	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+		(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+		(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+		(&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define	CQ_WR_FULL	BIT(26)
+#define	CQ_WR_DISABLE	BIT(25)
+#define	CQ_WR_FAULT	BIT(24)
+#define	CQ_CQE_COUNT	(0xFFFF << 0)
+
+#define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+			    int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+			      struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64  nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+			   u64 qidx, u64 val);
+u64  nicvf_queue_reg_read(struct nicvf *nic,
+			  u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644
index 0000000..3c1de97
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -0,0 +1,701 @@
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+	NIC_SEND_LD_TYPE_E_LDD = 0x0,
+	NIC_SEND_LD_TYPE_E_LDT = 0x1,
+	NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+	NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+	ETYPE_ALG_NONE = 0x0,
+	ETYPE_ALG_SKIP = 0x1,
+	ETYPE_ALG_ENDPARSE = 0x2,
+	ETYPE_ALG_VLAN = 0x3,
+	ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+	L3TYPE_NONE = 0x00,
+	L3TYPE_GRH = 0x01,
+	L3TYPE_IPV4 = 0x04,
+	L3TYPE_IPV4_OPTIONS = 0x05,
+	L3TYPE_IPV6 = 0x06,
+	L3TYPE_IPV6_OPTIONS = 0x07,
+	L3TYPE_ET_STOP = 0x0D,
+	L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+	L4TYPE_NONE = 0x00,
+	L4TYPE_IPSEC_ESP = 0x01,
+	L4TYPE_IPFRAG = 0x02,
+	L4TYPE_IPCOMP = 0x03,
+	L4TYPE_TCP = 0x04,
+	L4TYPE_UDP = 0x05,
+	L4TYPE_SCTP = 0x06,
+	L4TYPE_GRE = 0x07,
+	L4TYPE_ROCE_BTH = 0x08,
+	L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+	CPI_ALG_NONE = 0x0,
+	CPI_ALG_VLAN = 0x1,
+	CPI_ALG_VLAN16 = 0x2,
+	CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+	RSS_ALG_NONE = 0x00,
+	RSS_ALG_PORT = 0x01,
+	RSS_ALG_IP = 0x02,
+	RSS_ALG_TCP_IP = 0x03,
+	RSS_ALG_UDP_IP = 0x04,
+	RSS_ALG_SCTP_IP = 0x05,
+	RSS_ALG_GRE_IP = 0x06,
+	RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+	RSS_HASH_L2ETC = 0x00,
+	RSS_HASH_IP = 0x01,
+	RSS_HASH_TCP = 0x02,
+	RSS_HASH_TCP_SYN_DIS = 0x03,
+	RSS_HASH_UDP = 0x04,
+	RSS_HASH_L4ETC = 0x05,
+	RSS_HASH_ROCE = 0x06,
+	RSS_L3_BIDI = 0x07,
+	RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+	CQE_TYPE_INVALID = 0x0,
+	CQE_TYPE_RX = 0x2,
+	CQE_TYPE_RX_SPLIT = 0x3,
+	CQE_TYPE_RX_TCP = 0x4,
+	CQE_TYPE_SEND = 0x8,
+	CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+	CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+	CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+	CQE_SEND_STATUS_GOOD = 0x00,
+	CQE_SEND_STATUS_DESC_FAULT = 0x01,
+	CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+	CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+	CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+	CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+	CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+	CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+	CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+	CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+	CQE_SEND_STATUS_DATA_FAULT = 0x86,
+	CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+	CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+	CQE_SEND_STATUS_MEM_FAULT = 0x89,
+	CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+	CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+	CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+	CQE_RX_TCP_END_INVALID_FLAG = 1,
+	CQE_RX_TCP_END_TIMEOUT = 2,
+	CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+	CQE_RX_TCP_END_PKT_ERR = 4,
+	CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+	CQE_RX_ERRLVL_RE = 0x0,
+	CQE_RX_ERRLVL_L2 = 0x1,
+	CQE_RX_ERRLVL_L3 = 0x2,
+	CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+	CQE_RX_ERR_RE_NONE = 0x0,
+	CQE_RX_ERR_RE_PARTIAL = 0x1,
+	CQE_RX_ERR_RE_JABBER = 0x2,
+	CQE_RX_ERR_RE_FCS = 0x7,
+	CQE_RX_ERR_RE_TERMINATE = 0x9,
+	CQE_RX_ERR_RE_RX_CTL = 0xb,
+	CQE_RX_ERR_PREL2_ERR = 0x1f,
+	CQE_RX_ERR_L2_FRAGMENT = 0x20,
+	CQE_RX_ERR_L2_OVERRUN = 0x21,
+	CQE_RX_ERR_L2_PFCS = 0x22,
+	CQE_RX_ERR_L2_PUNY = 0x23,
+	CQE_RX_ERR_L2_MAL = 0x24,
+	CQE_RX_ERR_L2_OVERSIZE = 0x25,
+	CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+	CQE_RX_ERR_L2_LENMISM = 0x27,
+	CQE_RX_ERR_L2_PCLP = 0x28,
+	CQE_RX_ERR_IP_NOT = 0x41,
+	CQE_RX_ERR_IP_CHK = 0x42,
+	CQE_RX_ERR_IP_MAL = 0x43,
+	CQE_RX_ERR_IP_MALD = 0x44,
+	CQE_RX_ERR_IP_HOP = 0x45,
+	CQE_RX_ERR_L3_ICRC = 0x46,
+	CQE_RX_ERR_L3_PCLP = 0x47,
+	CQE_RX_ERR_L4_MAL = 0x61,
+	CQE_RX_ERR_L4_CHK = 0x62,
+	CQE_RX_ERR_UDP_LEN = 0x63,
+	CQE_RX_ERR_L4_PORT = 0x64,
+	CQE_RX_ERR_TCP_FLAG = 0x65,
+	CQE_RX_ERR_TCP_OFFSET = 0x66,
+	CQE_RX_ERR_L4_PCLP = 0x67,
+	CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   stdn_fault:1;
+	u64   rsvd0:1;
+	u64   rq_qs:7;
+	u64   rq_idx:3;
+	u64   rsvd1:12;
+	u64   rss_alg:4;
+	u64   rsvd2:4;
+	u64   rb_cnt:4;
+	u64   vlan_found:1;
+	u64   vlan_stripped:1;
+	u64   vlan2_found:1;
+	u64   vlan2_stripped:1;
+	u64   l4_type:4;
+	u64   l3_type:4;
+	u64   l2_present:1;
+	u64   err_level:3;
+	u64   err_opcode:8;
+
+	u64   pkt_len:16; /* W1 */
+	u64   l2_ptr:8;
+	u64   l3_ptr:8;
+	u64   l4_ptr:8;
+	u64   cq_pkt_len:8;
+	u64   align_pad:3;
+	u64   rsvd3:1;
+	u64   chan:12;
+
+	u64   rss_tag:32; /* W2 */
+	u64   vlan_tci:16;
+	u64   vlan_ptr:8;
+	u64   vlan2_ptr:8;
+
+	u64   rb3_sz:16; /* W3 */
+	u64   rb2_sz:16;
+	u64   rb1_sz:16;
+	u64   rb0_sz:16;
+
+	u64   rb7_sz:16; /* W4 */
+	u64   rb6_sz:16;
+	u64   rb5_sz:16;
+	u64   rb4_sz:16;
+
+	u64   rb11_sz:16; /* W5 */
+	u64   rb10_sz:16;
+	u64   rb9_sz:16;
+	u64   rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   err_opcode:8;
+	u64   err_level:3;
+	u64   l2_present:1;
+	u64   l3_type:4;
+	u64   l4_type:4;
+	u64   vlan2_stripped:1;
+	u64   vlan2_found:1;
+	u64   vlan_stripped:1;
+	u64   vlan_found:1;
+	u64   rb_cnt:4;
+	u64   rsvd2:4;
+	u64   rss_alg:4;
+	u64   rsvd1:12;
+	u64   rq_idx:3;
+	u64   rq_qs:7;
+	u64   rsvd0:1;
+	u64   stdn_fault:1;
+	u64   cqe_type:4; /* W0 */
+	u64   chan:12;
+	u64   rsvd3:1;
+	u64   align_pad:3;
+	u64   cq_pkt_len:8;
+	u64   l4_ptr:8;
+	u64   l3_ptr:8;
+	u64   l2_ptr:8;
+	u64   pkt_len:16; /* W1 */
+	u64   vlan2_ptr:8;
+	u64   vlan_ptr:8;
+	u64   vlan_tci:16;
+	u64   rss_tag:32; /* W2 */
+	u64   rb0_sz:16;
+	u64   rb1_sz:16;
+	u64   rb2_sz:16;
+	u64   rb3_sz:16; /* W3 */
+	u64   rb4_sz:16;
+	u64   rb5_sz:16;
+	u64   rb6_sz:16;
+	u64   rb7_sz:16; /* W4 */
+	u64   rb8_sz:16;
+	u64   rb9_sz:16;
+	u64   rb10_sz:16;
+	u64   rb11_sz:16; /* W5 */
+#endif
+	u64   rb0_ptr:64;
+	u64   rb1_ptr:64;
+	u64   rb2_ptr:64;
+	u64   rb3_ptr:64;
+	u64   rb4_ptr:64;
+	u64   rb5_ptr:64;
+	u64   rb6_ptr:64;
+	u64   rb7_ptr:64;
+	u64   rb8_ptr:64;
+	u64   rb9_ptr:64;
+	u64   rb10_ptr:64;
+	u64   rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   rsvd0:60;
+
+	u64   rsvd1:4; /* W1 */
+	u64   partial_first:1;
+	u64   rsvd2:27;
+	u64   rbdr_bytes:8;
+	u64   rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   rsvd0:60;
+	u64   cqe_type:4;
+
+	u64   rsvd3:24;
+	u64   rbdr_bytes:8;
+	u64   rsvd2:27;
+	u64   partial_first:1;
+	u64   rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   rsvd0:52;
+	u64   cq_tcp_status:8;
+
+	u64   rsvd1:32; /* W1 */
+	u64   tcp_cntx_bytes:8;
+	u64   rsvd2:8;
+	u64   tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   cq_tcp_status:8;
+	u64   rsvd0:52;
+	u64   cqe_type:4; /* W0 */
+
+	u64   tcp_err_bytes:16;
+	u64   rsvd2:8;
+	u64   tcp_cntx_bytes:8;
+	u64   rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   rsvd0:4;
+	u64   sqe_ptr:16;
+	u64   rsvd1:4;
+	u64   rsvd2:10;
+	u64   sq_qs:7;
+	u64   sq_idx:3;
+	u64   rsvd3:8;
+	u64   send_status:8;
+
+	u64   ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   send_status:8;
+	u64   rsvd3:8;
+	u64   sq_idx:3;
+	u64   sq_qs:7;
+	u64   rsvd2:10;
+	u64   rsvd1:4;
+	u64   sqe_ptr:16;
+	u64   rsvd0:4;
+	u64   cqe_type:4; /* W0 */
+
+	u64   ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+	u64    u[64];
+	struct cqe_send_t snd_hdr;
+	struct cqe_rx_t rx_hdr;
+	struct cqe_rx_tcp_t rx_tcp_hdr;
+	struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   rsvd0:15;
+	u64   buf_addr:42;
+	u64   cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   cache_align:7;
+	u64   buf_addr:42;
+	u64   rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   tcp_pkt_cnt:12;
+	u64   rsvd1:4;
+	u64   align_hdr_bytes:4;
+	u64   align_ptr_bytes:4;
+	u64   ptr_bytes:16;
+	u64   rsvd2:24;
+	u64   cqe_type:4;
+	u64   rsvd0:54;
+	u64   tcp_end_reason:2;
+	u64   tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   tcp_status:4;
+	u64   tcp_end_reason:2;
+	u64   rsvd0:54;
+	u64   cqe_type:4;
+	u64   rsvd2:24;
+	u64   ptr_bytes:16;
+	u64   align_ptr_bytes:4;
+	u64   align_hdr_bytes:4;
+	u64   rsvd1:4;
+	u64   tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+	u64   opaque:32;
+	u64   rss_flow:8;
+	u64   skip_length:6;
+	u64   disable_rss:1;
+	u64   disable_tcp_reassembly:1;
+	u64   nodrop:1;
+	u64   dest_alg:2;
+	u64   rsvd0:2;
+	u64   dest_rq:11;
+};
+
+enum send_l4_csum_type {
+	SEND_L4_CSUM_DISABLE = 0x00,
+	SEND_L4_CSUM_UDP = 0x01,
+	SEND_L4_CSUM_TCP = 0x02,
+	SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+	SEND_CRCALG_CRC32 = 0x00,
+	SEND_CRCALG_CRC32C = 0x01,
+	SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+	SEND_LD_TYPE_LDD = 0x00,
+	SEND_LD_TYPE_LDT = 0x01,
+	SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+	SEND_MEMALG_SET = 0x00,
+	SEND_MEMALG_ADD = 0x08,
+	SEND_MEMALG_SUB = 0x09,
+	SEND_MEMALG_ADDLEN = 0x0A,
+	SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+	SEND_MEMDSZ_B64 = 0x00,
+	SEND_MEMDSZ_B32 = 0x01,
+	SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+	SQ_DESC_TYPE_INVALID = 0x00,
+	SQ_DESC_TYPE_HEADER = 0x01,
+	SQ_DESC_TYPE_CRC = 0x02,
+	SQ_DESC_TYPE_IMMEDIATE = 0x03,
+	SQ_DESC_TYPE_GATHER = 0x04,
+	SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    rsvd1:32;
+	u64    crc_ival:32;
+	u64    subdesc_type:4;
+	u64    crc_alg:2;
+	u64    rsvd0:10;
+	u64    crc_insert_pos:16;
+	u64    hdr_start:16;
+	u64    crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    crc_len:16;
+	u64    hdr_start:16;
+	u64    crc_insert_pos:16;
+	u64    rsvd0:10;
+	u64    crc_alg:2;
+	u64    subdesc_type:4;
+	u64    crc_ival:32;
+	u64    rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4; /* W0 */
+	u64    ld_type:2;
+	u64    rsvd0:42;
+	u64    size:16;
+
+	u64    rsvd1:15; /* W1 */
+	u64    addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    size:16;
+	u64    rsvd0:42;
+	u64    ld_type:2;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    addr:49;
+	u64    rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4; /* W0 */
+	u64    rsvd0:46;
+	u64    len:14;
+
+	u64    data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    len:14;
+	u64    rsvd0:46;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4; /* W0 */
+	u64    mem_alg:4;
+	u64    mem_dsz:2;
+	u64    wmem:1;
+	u64    rsvd0:21;
+	u64    offset:32;
+
+	u64    rsvd1:15; /* W1 */
+	u64    addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    offset:32;
+	u64    rsvd0:21;
+	u64    wmem:1;
+	u64    mem_dsz:2;
+	u64    mem_alg:4;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    addr:49;
+	u64    rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4;
+	u64    tso:1;
+	u64    post_cqe:1; /* Post CQE on no error also */
+	u64    dont_send:1;
+	u64    tstmp:1;
+	u64    subdesc_cnt:8;
+	u64    csum_l4:2;
+	u64    csum_l3:1;
+	u64    rsvd0:5;
+	u64    l4_offset:8;
+	u64    l3_offset:8;
+	u64    rsvd1:4;
+	u64    tot_len:20; /* W0 */
+
+	u64    tso_sdc_cont:8;
+	u64    tso_sdc_first:8;
+	u64    tso_l4_offset:8;
+	u64    tso_flags_last:12;
+	u64    tso_flags_first:12;
+	u64    rsvd2:2;
+	u64    tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    tot_len:20;
+	u64    rsvd1:4;
+	u64    l3_offset:8;
+	u64    l4_offset:8;
+	u64    rsvd0:5;
+	u64    csum_l3:1;
+	u64    csum_l4:2;
+	u64    subdesc_cnt:8;
+	u64    tstmp:1;
+	u64    dont_send:1;
+	u64    post_cqe:1; /* Post CQE on no error also */
+	u64    tso:1;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    tso_max_paysize:14;
+	u64    rsvd2:2;
+	u64    tso_flags_first:12;
+	u64    tso_flags_last:12;
+	u64    tso_l4_offset:8;
+	u64    tso_sdc_first:8;
+	u64    tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_2_63:62;
+	u64 ena:1;
+	u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 tcp_ena:1;
+	u64 ena:1;
+	u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_43_63:21;
+	u64 ena:1;
+	u64 reset:1;
+	u64 caching:1;
+	u64 reserved_35_39:5;
+	u64 qsize:3;
+	u64 reserved_25_31:7;
+	u64 avg_con:9;
+	u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 reserved_0_15:16;
+	u64 avg_con:9;
+	u64 reserved_25_31:7;
+	u64 qsize:3;
+	u64 reserved_35_39:5;
+	u64 caching:1;
+	u64 reset:1;
+	u64 ena:1;
+	u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_20_63:44;
+	u64 ena:1;
+	u64 reserved_18_18:1;
+	u64 reset:1;
+	u64 ldwb:1;
+	u64 reserved_11_15:5;
+	u64 qsize:3;
+	u64 reserved_3_7:5;
+	u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 tstmp_bgx_intf:3;
+	u64 reserved_3_7:5;
+	u64 qsize:3;
+	u64 reserved_11_15:5;
+	u64 ldwb:1;
+	u64 reset:1;
+	u64 reserved_18_18:1;
+	u64 ena:1;
+	u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_45_63:19;
+	u64 ena:1;
+	u64 reset:1;
+	u64 ldwb:1;
+	u64 reserved_36_41:6;
+	u64 qsize:4;
+	u64 reserved_25_31:7;
+	u64 avg_con:9;
+	u64 reserved_12_15:4;
+	u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 lines:12;
+	u64 reserved_12_15:4;
+	u64 avg_con:9;
+	u64 reserved_25_31:7;
+	u64 qsize:4;
+	u64 reserved_36_41:6;
+	u64 ldwb:1;
+	u64 reset:1;
+	u64 ena: 1;
+	u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_32_63:32;
+	u64 ena:1;
+	u64 reserved_27_30:4;
+	u64 sq_ins_ena:1;
+	u64 sq_ins_pos:6;
+	u64 lock_ena:1;
+	u64 lock_viol_cqe_ena:1;
+	u64 send_tstmp_ena:1;
+	u64 be:1;
+	u64 reserved_7_15:9;
+	u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 vnic:7;
+	u64 reserved_7_15:9;
+	u64 be:1;
+	u64 send_tstmp_ena:1;
+	u64 lock_viol_cqe_ena:1;
+	u64 lock_ena:1;
+	u64 sq_ins_pos:6;
+	u64 sq_ins_ena:1;
+	u64 reserved_27_30:4;
+	u64 ena:1;
+	u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644
index 0000000..633ec05
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME	"thunder-BGX"
+#define DRV_VERSION	"1.0"
+
+struct lmac {
+	struct bgx		*bgx;
+	int			dmac;
+	unsigned char		mac[ETH_ALEN];
+	bool			link_up;
+	int			lmacid; /* ID within BGX */
+	int			lmacid_bd; /* ID on board */
+	struct net_device       netdev;
+	struct phy_device       *phydev;
+	unsigned int            last_duplex;
+	unsigned int            last_link;
+	unsigned int            last_speed;
+	bool			is_sgmii;
+	struct delayed_work	dwork;
+	struct workqueue_struct *check_link;
+};
+
+struct bgx {
+	u8			bgx_id;
+	u8			qlm_mode;
+	struct	lmac		lmac[MAX_LMAC_PER_BGX];
+	int			lmac_count;
+	int                     lmac_type;
+	int                     lane_to_sds;
+	int			use_training;
+	void __iomem		*reg_base;
+	struct pci_dev		*pdev;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+	{ 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver.  The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+	return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+	writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+	writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+	int timeout = 100;
+	u64 reg_val;
+
+	while (timeout) {
+		reg_val = bgx_reg_read(bgx, lmac, reg);
+		if (zero && !(reg_val & mask))
+			return 0;
+		if (!zero && (reg_val & mask))
+			return 0;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	return 1;
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+	int i;
+	unsigned map = 0;
+
+	for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
+		if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+			map |= (1 << i);
+	}
+
+	return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+	struct bgx *bgx;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	if (bgx)
+		return bgx->lmac_count;
+
+	return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+	struct bgx_link_status *link = (struct bgx_link_status *)status;
+	struct bgx *bgx;
+	struct lmac *lmac;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	if (!bgx)
+		return;
+
+	lmac = &bgx->lmac[lmacid];
+	link->link_up = lmac->link_up;
+	link->duplex = lmac->last_duplex;
+	link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+	if (bgx)
+		return bgx->lmac[lmacid].mac;
+
+	return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+	if (!bgx)
+		return;
+
+	ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+	struct bgx *bgx = lmac->bgx;
+	u64 cmr_cfg;
+	u64 port_cfg = 0;
+	u64 misc_ctl = 0;
+
+	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+	cmr_cfg &= ~CMR_EN;
+	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+	if (lmac->link_up) {
+		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+		port_cfg |=  (lmac->last_duplex << 2);
+	} else {
+		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+	}
+
+	switch (lmac->last_speed) {
+	case 10:
+		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
+		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+		misc_ctl |= 50; /* samp_pt */
+		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+		break;
+	case 100:
+		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+		misc_ctl |= 5; /* samp_pt */
+		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+		break;
+	case 1000:
+		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+		misc_ctl |= 1; /* samp_pt */
+		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+		if (lmac->last_duplex)
+			bgx_reg_write(bgx, lmac->lmacid,
+				      BGX_GMP_GMI_TXX_BURST, 0);
+		else
+			bgx_reg_write(bgx, lmac->lmacid,
+				      BGX_GMP_GMI_TXX_BURST, 8192);
+		break;
+	default:
+		break;
+	}
+	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+	/* renable lmac */
+	cmr_cfg |= CMR_EN;
+	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+	struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+	struct phy_device *phydev = lmac->phydev;
+	int link_changed = 0;
+
+	if (!lmac)
+		return;
+
+	if (!phydev->link && lmac->last_link)
+		link_changed = -1;
+
+	if (phydev->link &&
+	    (lmac->last_duplex != phydev->duplex ||
+	     lmac->last_link != phydev->link ||
+	     lmac->last_speed != phydev->speed)) {
+			link_changed = 1;
+	}
+
+	lmac->last_link = phydev->link;
+	lmac->last_speed = phydev->speed;
+	lmac->last_duplex = phydev->duplex;
+
+	if (!link_changed)
+		return;
+
+	if (link_changed > 0)
+		lmac->link_up = true;
+	else
+		lmac->link_up = false;
+
+	if (lmac->is_sgmii)
+		bgx_sgmii_change_link_state(lmac);
+	else
+		bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+	struct bgx *bgx;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	if (!bgx)
+		return 0;
+
+	if (idx > 8)
+		lmac = 0;
+	return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+	struct bgx *bgx;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	if (!bgx)
+		return 0;
+
+	return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
+{
+	u64 offset;
+
+	while (bgx->lmac[lmac].dmac > 0) {
+		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
+			(lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
+		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
+		bgx->lmac[lmac].dmac--;
+	}
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+	u64 cfg;
+
+	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+	/* max packet size */
+	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+	/* Disable frame alignment if using preamble */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+	if (cfg & 1)
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+	/* Enable lmac */
+	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+	/* PCS reset */
+	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+			 PCS_MRX_CTL_RESET, true)) {
+		dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+		return -1;
+	}
+
+	/* power down, reset autoneg, autoneg enable */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+	cfg &= ~PCS_MRX_CTL_PWR_DN;
+	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+			 PCS_MRX_STATUS_AN_CPT, false)) {
+		dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+	u64 cfg;
+
+	/* Reset SPU */
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+		dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+		return -1;
+	}
+
+	/* Disable LMAC */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	cfg &= ~CMR_EN;
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+	/* Set interleaved running disparity for RXAUI */
+	if (bgx->lmac_type != BGX_MODE_RXAUI)
+		bgx_reg_modify(bgx, lmacid,
+			       BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+	else
+		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+			       SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+	/* clear all interrupts */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+	if (bgx->use_training) {
+		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+		/* training enable */
+		bgx_reg_modify(bgx, lmacid,
+			       BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+	}
+
+	/* Append FCS to each packet */
+	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+	/* Disable forward error correction */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+	cfg &= ~SPU_FEC_CTL_FEC_EN;
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+	/* Disable autoneg */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+	if (bgx->lmac_type == BGX_MODE_10G_KR)
+		cfg |= (1 << 23);
+	else if (bgx->lmac_type == BGX_MODE_40G_KR)
+		cfg |= (1 << 24);
+	else
+		cfg &= ~((1 << 23) | (1 << 24));
+	cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+	/* Enable lmac */
+	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+	cfg &= ~SPU_CTL_LOW_POWER;
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+	cfg &= ~SMU_TX_CTL_UNI_EN;
+	cfg |= SMU_TX_CTL_DIC_EN;
+	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+	/* take lmac_count into account */
+	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+	/* max packet size */
+	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+	return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+	struct bgx *bgx = lmac->bgx;
+	int lmacid = lmac->lmacid;
+	int lmac_type = bgx->lmac_type;
+	u64 cfg;
+
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+	if (bgx->use_training) {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+		if (!(cfg & (1ull << 13))) {
+			cfg = (1ull << 13) | (1ull << 14);
+			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+			cfg |= (1ull << 0);
+			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+			return -1;
+		}
+	}
+
+	/* wait for PCS to come out of reset */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+		dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+		return -1;
+	}
+
+	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+				 SPU_BR_STATUS_BLK_LOCK, false)) {
+			dev_err(&bgx->pdev->dev,
+				"SPU_BR_STATUS_BLK_LOCK not completed\n");
+			return -1;
+		}
+	} else {
+		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+				 SPU_BX_STATUS_RX_ALIGN, false)) {
+			dev_err(&bgx->pdev->dev,
+				"SPU_BX_STATUS_RX_ALIGN not completed\n");
+			return -1;
+		}
+	}
+
+	/* Clear rcvflt bit (latching high) and read it back */
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+		dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+		if (bgx->use_training) {
+			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+			if (!(cfg & (1ull << 13))) {
+				cfg = (1ull << 13) | (1ull << 14);
+				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+				cfg = bgx_reg_read(bgx, lmacid,
+						   BGX_SPUX_BR_PMD_CRTL);
+				cfg |= (1ull << 0);
+				bgx_reg_write(bgx, lmacid,
+					      BGX_SPUX_BR_PMD_CRTL, cfg);
+				return -1;
+			}
+		}
+		return -1;
+	}
+
+	/* Wait for MAC RX to be ready */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+			 SMU_RX_CTL_STATUS, true)) {
+		dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+		return -1;
+	}
+
+	/* Wait for BGX RX to be idle */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+		dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+		return -1;
+	}
+
+	/* Wait for BGX TX to be idle */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+		dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+		return -1;
+	}
+
+	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+		dev_err(&bgx->pdev->dev, "Receive fault\n");
+		return -1;
+	}
+
+	/* Receive link is latching low. Force it high and verify it */
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+			 SPU_STATUS1_RCV_LNK, false)) {
+		dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+		return -1;
+	}
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+	cfg &= ~SPU_MISC_CTL_RX_DIS;
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+	return 0;
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+	struct lmac *lmac;
+	u64 link;
+
+	lmac = container_of(work, struct lmac, dwork.work);
+
+	/* Receive link is latching low. Force it high and verify it */
+	bgx_reg_modify(lmac->bgx, lmac->lmacid,
+		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+		     SPU_STATUS1_RCV_LNK, false);
+
+	link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+	if (link & SPU_STATUS1_RCV_LNK) {
+		lmac->link_up = 1;
+		if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+			lmac->last_speed = 40000;
+		else
+			lmac->last_speed = 10000;
+		lmac->last_duplex = 1;
+	} else {
+		lmac->link_up = 0;
+	}
+
+	if (lmac->last_link != lmac->link_up) {
+		lmac->last_link = lmac->link_up;
+		if (lmac->link_up)
+			bgx_xaui_check_link(lmac);
+	}
+
+	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+	struct lmac *lmac;
+	u64 cfg;
+
+	lmac = &bgx->lmac[lmacid];
+	lmac->bgx = bgx;
+
+	if (bgx->lmac_type == BGX_MODE_SGMII) {
+		lmac->is_sgmii = 1;
+		if (bgx_lmac_sgmii_init(bgx, lmacid))
+			return -1;
+	} else {
+		lmac->is_sgmii = 0;
+		if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+			return -1;
+	}
+
+	if (lmac->is_sgmii) {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+	} else {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+	}
+
+	/* Enable lmac */
+	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+	/* Restore default cfg, incase low level firmware changed it */
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+	if ((bgx->lmac_type != BGX_MODE_XFI) &&
+	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
+	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
+	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
+		if (!lmac->phydev)
+			return -ENODEV;
+
+		lmac->phydev->dev_flags = 0;
+
+		if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+				       bgx_lmac_handler,
+				       PHY_INTERFACE_MODE_SGMII))
+			return -ENODEV;
+
+		phy_start_aneg(lmac->phydev);
+	} else {
+		lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+						   WQ_MEM_RECLAIM, 1);
+		if (!lmac->check_link)
+			return -ENOMEM;
+		INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+		queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+	}
+
+	return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+	struct lmac *lmac;
+	u64 cmrx_cfg;
+
+	lmac = &bgx->lmac[lmacid];
+	if (lmac->check_link) {
+		/* Destroy work queue */
+		cancel_delayed_work(&lmac->dwork);
+		flush_workqueue(lmac->check_link);
+		destroy_workqueue(lmac->check_link);
+	}
+
+	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	cmrx_cfg &= ~(1 << 15);
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+	bgx_flush_dmac_addrs(bgx, lmacid);
+
+	if (lmac->phydev)
+		phy_disconnect(lmac->phydev);
+
+	lmac->phydev = NULL;
+}
+
+static void bgx_set_num_ports(struct bgx *bgx)
+{
+	u64 lmac_count;
+
+	switch (bgx->qlm_mode) {
+	case QLM_MODE_SGMII:
+		bgx->lmac_count = 4;
+		bgx->lmac_type = BGX_MODE_SGMII;
+		bgx->lane_to_sds = 0;
+		break;
+	case QLM_MODE_XAUI_1X4:
+		bgx->lmac_count = 1;
+		bgx->lmac_type = BGX_MODE_XAUI;
+		bgx->lane_to_sds = 0xE4;
+			break;
+	case QLM_MODE_RXAUI_2X2:
+		bgx->lmac_count = 2;
+		bgx->lmac_type = BGX_MODE_RXAUI;
+		bgx->lane_to_sds = 0xE4;
+			break;
+	case QLM_MODE_XFI_4X1:
+		bgx->lmac_count = 4;
+		bgx->lmac_type = BGX_MODE_XFI;
+		bgx->lane_to_sds = 0;
+		break;
+	case QLM_MODE_XLAUI_1X4:
+		bgx->lmac_count = 1;
+		bgx->lmac_type = BGX_MODE_XLAUI;
+		bgx->lane_to_sds = 0xE4;
+		break;
+	case QLM_MODE_10G_KR_4X1:
+		bgx->lmac_count = 4;
+		bgx->lmac_type = BGX_MODE_10G_KR;
+		bgx->lane_to_sds = 0;
+		bgx->use_training = 1;
+		break;
+	case QLM_MODE_40G_KR4_1X4:
+		bgx->lmac_count = 1;
+		bgx->lmac_type = BGX_MODE_40G_KR;
+		bgx->lane_to_sds = 0xE4;
+		bgx->use_training = 1;
+		break;
+	default:
+		bgx->lmac_count = 0;
+		break;
+	}
+
+	/* Check if low level firmware has programmed LMAC count
+	 * based on board type, if yes consider that otherwise
+	 * the default static values
+	 */
+	lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+	if (lmac_count != 4)
+		bgx->lmac_count = lmac_count;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+	int i;
+
+	bgx_set_num_ports(bgx);
+
+	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+		dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+	/* Set lmac type and lane2serdes mapping */
+	for (i = 0; i < bgx->lmac_count; i++) {
+		if (bgx->lmac_type == BGX_MODE_RXAUI) {
+			if (i)
+				bgx->lane_to_sds = 0x0e;
+			else
+				bgx->lane_to_sds = 0x04;
+			bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+				      (bgx->lmac_type << 8) | bgx->lane_to_sds);
+			continue;
+		}
+		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+			      (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+		bgx->lmac[i].lmacid_bd = lmac_count;
+		lmac_count++;
+	}
+
+	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+	/* Set the backpressure AND mask */
+	for (i = 0; i < bgx->lmac_count; i++)
+		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+			       ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+			       (i * MAX_BGX_CHANS_PER_LMAC));
+
+	/* Disable all MAC filtering */
+	for (i = 0; i < RX_DMAC_COUNT; i++)
+		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+	/* Disable MAC steering (NCSI traffic) */
+	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+	struct device *dev = &bgx->pdev->dev;
+	int lmac_type;
+	int train_en;
+
+	/* Read LMAC0 type to figure out QLM mode
+	 * This is configured by low level firmware
+	 */
+	lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+	lmac_type = (lmac_type >> 8) & 0x07;
+
+	train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
+				SPU_PMD_CRTL_TRAIN_EN;
+
+	switch (lmac_type) {
+	case BGX_MODE_SGMII:
+		bgx->qlm_mode = QLM_MODE_SGMII;
+		dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+		break;
+	case BGX_MODE_XAUI:
+		bgx->qlm_mode = QLM_MODE_XAUI_1X4;
+		dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+		break;
+	case BGX_MODE_RXAUI:
+		bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
+		dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+		break;
+	case BGX_MODE_XFI:
+		if (!train_en) {
+			bgx->qlm_mode = QLM_MODE_XFI_4X1;
+			dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
+		} else {
+			bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
+			dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
+		}
+		break;
+	case BGX_MODE_XLAUI:
+		if (!train_en) {
+			bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
+			dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
+		} else {
+			bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
+			dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
+		}
+		break;
+	default:
+		bgx->qlm_mode = QLM_MODE_SGMII;
+		dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+	}
+}
+
+static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+{
+	struct device_node *np_child;
+	u8 lmac = 0;
+
+	for_each_child_of_node(np, np_child) {
+		struct device_node *phy_np;
+		const char *mac;
+
+		phy_np = of_parse_phandle(np_child, "phy-handle", 0);
+		if (phy_np)
+			bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+
+		mac = of_get_mac_address(np_child);
+		if (mac)
+			ether_addr_copy(bgx->lmac[lmac].mac, mac);
+
+		SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+		bgx->lmac[lmac].lmacid = lmac;
+		lmac++;
+		if (lmac == MAX_LMAC_PER_BGX)
+			break;
+	}
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int err;
+	struct device *dev = &pdev->dev;
+	struct bgx *bgx = NULL;
+	struct device_node *np;
+	char bgx_sel[5];
+	u8 lmac;
+
+	bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+	if (!bgx)
+		return -ENOMEM;
+	bgx->pdev = pdev;
+
+	pci_set_drvdata(pdev, bgx);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		pci_set_drvdata(pdev, NULL);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto err_disable_device;
+	}
+
+	/* MAP configuration registers */
+	bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+	if (!bgx->reg_base) {
+		dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+	bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+	bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
+
+	bgx_vnic[bgx->bgx_id] = bgx;
+	bgx_get_qlm_mode(bgx);
+
+	snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+	np = of_find_node_by_name(NULL, bgx_sel);
+	if (np)
+		bgx_init_of(bgx, np);
+
+	bgx_init_hw(bgx);
+
+	/* Enable all LMACs */
+	for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+		err = bgx_lmac_enable(bgx, lmac);
+		if (err) {
+			dev_err(dev, "BGX%d failed to enable lmac%d\n",
+				bgx->bgx_id, lmac);
+			goto err_enable;
+		}
+	}
+
+	return 0;
+
+err_enable:
+	bgx_vnic[bgx->bgx_id] = NULL;
+err_release_regions:
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+	struct bgx *bgx = pci_get_drvdata(pdev);
+	u8 lmac;
+
+	/* Disable all LMACs */
+	for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+		bgx_lmac_disable(bgx, lmac);
+
+	bgx_vnic[bgx->bgx_id] = NULL;
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+	.name = DRV_NAME,
+	.id_table = bgx_id_table,
+	.probe = bgx_probe,
+	.remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+	pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+	return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+	pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644
index 0000000..ba4f53b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+#define    MAX_BGX_THUNDER			8 /* Max 4 nodes, 2 per node */
+#define    MAX_BGX_PER_CN88XX			2
+#define    MAX_LMAC_PER_BGX			4
+#define    MAX_BGX_CHANS_PER_LMAC		16
+#define    MAX_DMAC_PER_LMAC			8
+#define    MAX_FRAME_SIZE			9216
+
+#define    MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE	2
+
+#define    MAX_LMAC	(MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
+
+/* Registers */
+#define BGX_CMRX_CFG			0x00
+#define  CMR_PKT_TX_EN				BIT_ULL(13)
+#define  CMR_PKT_RX_EN				BIT_ULL(14)
+#define  CMR_EN					BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG		0x08
+#define  CMR_GLOBAL_CFG_FCS_STRIP		BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP		0x60
+#define BGX_CMRX_RX_STAT0		0x70
+#define BGX_CMRX_RX_STAT1		0x78
+#define BGX_CMRX_RX_STAT2		0x80
+#define BGX_CMRX_RX_STAT3		0x88
+#define BGX_CMRX_RX_STAT4		0x90
+#define BGX_CMRX_RX_STAT5		0x98
+#define BGX_CMRX_RX_STAT6		0xA0
+#define BGX_CMRX_RX_STAT7		0xA8
+#define BGX_CMRX_RX_STAT8		0xB0
+#define BGX_CMRX_RX_STAT9		0xB8
+#define BGX_CMRX_RX_STAT10		0xC0
+#define BGX_CMRX_RX_BP_DROP		0xC8
+#define BGX_CMRX_RX_DMAC_CTL		0x0E8
+#define BGX_CMR_RX_DMACX_CAM		0x200
+#define  RX_DMACX_CAM_EN			BIT_ULL(48)
+#define  RX_DMACX_CAM_LMACID(x)			(x << 49)
+#define  RX_DMAC_COUNT				32
+#define BGX_CMR_RX_STREERING		0x300
+#define  RX_TRAFFIC_STEER_RULE_COUNT		8
+#define BGX_CMR_CHAN_MSK_AND		0x450
+#define BGX_CMR_BIST_STATUS		0x460
+#define BGX_CMR_RX_LMACS		0x468
+#define BGX_CMRX_TX_STAT0		0x600
+#define BGX_CMRX_TX_STAT1		0x608
+#define BGX_CMRX_TX_STAT2		0x610
+#define BGX_CMRX_TX_STAT3		0x618
+#define BGX_CMRX_TX_STAT4		0x620
+#define BGX_CMRX_TX_STAT5		0x628
+#define BGX_CMRX_TX_STAT6		0x630
+#define BGX_CMRX_TX_STAT7		0x638
+#define BGX_CMRX_TX_STAT8		0x640
+#define BGX_CMRX_TX_STAT9		0x648
+#define BGX_CMRX_TX_STAT10		0x650
+#define BGX_CMRX_TX_STAT11		0x658
+#define BGX_CMRX_TX_STAT12		0x660
+#define BGX_CMRX_TX_STAT13		0x668
+#define BGX_CMRX_TX_STAT14		0x670
+#define BGX_CMRX_TX_STAT15		0x678
+#define BGX_CMRX_TX_STAT16		0x680
+#define BGX_CMRX_TX_STAT17		0x688
+#define BGX_CMR_TX_LMACS		0x1000
+
+#define BGX_SPUX_CONTROL1		0x10000
+#define  SPU_CTL_LOW_POWER			BIT_ULL(11)
+#define  SPU_CTL_RESET				BIT_ULL(15)
+#define BGX_SPUX_STATUS1		0x10008
+#define  SPU_STATUS1_RCV_LNK			BIT_ULL(2)
+#define BGX_SPUX_STATUS2		0x10020
+#define  SPU_STATUS2_RCVFLT			BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS		0x10028
+#define  SPU_BX_STATUS_RX_ALIGN			BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1		0x10030
+#define  SPU_BR_STATUS_BLK_LOCK			BIT_ULL(0)
+#define  SPU_BR_STATUS_RCV_LNK			BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL		0x10068
+#define  SPU_PMD_CRTL_TRAIN_EN			BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP		0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP		0x10088
+#define BGX_SPUX_BR_PMD_LD_REP		0x10090
+#define BGX_SPUX_FEC_CONTROL		0x100A0
+#define  SPU_FEC_CTL_FEC_EN			BIT_ULL(0)
+#define  SPU_FEC_CTL_ERR_EN			BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL		0x100C8
+#define  SPU_AN_CTL_AN_EN			BIT_ULL(12)
+#define  SPU_AN_CTL_XNP_EN			BIT_ULL(13)
+#define BGX_SPUX_AN_ADV			0x100D8
+#define BGX_SPUX_MISC_CONTROL		0x10218
+#define  SPU_MISC_CTL_INTLV_RDISP		BIT_ULL(10)
+#define  SPU_MISC_CTL_RX_DIS			BIT_ULL(12)
+#define BGX_SPUX_INT			0x10220	/* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S		0x10228
+#define BGX_SPUX_INT_ENA_W1C		0x10230
+#define BGX_SPUX_INT_ENA_W1S		0x10238
+#define BGX_SPU_DBG_CONTROL		0x10300
+#define  SPU_DBG_CTL_AN_ARB_LINK_CHK_EN		BIT_ULL(18)
+#define  SPU_DBG_CTL_AN_NONCE_MCT_DIS		BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT			0x20000
+#define BGX_SMUX_RX_JABBER		0x20030
+#define BGX_SMUX_RX_CTL			0x20048
+#define  SMU_RX_CTL_STATUS			(3ull << 0)
+#define BGX_SMUX_TX_APPEND		0x20100
+#define  SMU_TX_APPEND_FCS_D			BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT		0x20118
+#define BGX_SMUX_TX_INT			0x20140
+#define BGX_SMUX_TX_CTL			0x20178
+#define  SMU_TX_CTL_DIC_EN			BIT_ULL(0)
+#define  SMU_TX_CTL_UNI_EN			BIT_ULL(1)
+#define  SMU_TX_CTL_LNK_STATUS			(3ull << 4)
+#define BGX_SMUX_TX_THRESH		0x20180
+#define BGX_SMUX_CTL			0x20200
+#define  SMU_CTL_RX_IDLE			BIT_ULL(0)
+#define  SMU_CTL_TX_IDLE			BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL		0x30000
+#define	 PCS_MRX_CTL_RST_AN			BIT_ULL(9)
+#define	 PCS_MRX_CTL_PWR_DN			BIT_ULL(11)
+#define	 PCS_MRX_CTL_AN_EN			BIT_ULL(12)
+#define	 PCS_MRX_CTL_RESET			BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS		0x30008
+#define	 PCS_MRX_STATUS_AN_CPT			BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS	0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV		0x30068
+#define BGX_GMP_PCS_MISCX_CTL		0x30078
+#define  PCS_MISC_CTL_GMX_ENO			BIT_ULL(11)
+#define  PCS_MISC_CTL_SAMP_PT_MASK	0x7Full
+#define BGX_GMP_GMI_PRTX_CFG		0x38020
+#define  GMI_PORT_CFG_SPEED			BIT_ULL(1)
+#define  GMI_PORT_CFG_DUPLEX			BIT_ULL(2)
+#define  GMI_PORT_CFG_SLOT_TIME			BIT_ULL(3)
+#define  GMI_PORT_CFG_SPEED_MSB			BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER		0x38038
+#define BGX_GMP_GMI_TXX_THRESH		0x38210
+#define BGX_GMP_GMI_TXX_APPEND		0x38218
+#define BGX_GMP_GMI_TXX_SLOT		0x38220
+#define BGX_GMP_GMI_TXX_BURST		0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT		0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL	0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR		0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL		0x400008
+#define BGX_MSIX_PBA_0			0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS	30
+#define BGX_LMAC_VEC_OFFSET	7
+#define BGX_MSIX_VEC_SHIFT	4
+
+#define CMRX_INT		0
+#define SPUX_INT		1
+#define SMUX_RX_INT		2
+#define SMUX_TX_INT		3
+#define GMPX_PCS_INT		4
+#define GMPX_GMI_RX_INT		5
+#define GMPX_GMI_TX_INT		6
+#define CMR_MEM_INT		28
+#define SPU_MEM_INT		29
+
+#define LMAC_INTR_LINK_UP	BIT(0)
+#define LMAC_INTR_LINK_DOWN	BIT(1)
+
+/*  RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+		MCAST_MODE_REJECT,
+		MCAST_MODE_ACCEPT,
+		MCAST_MODE_CAM_FILTER,
+		RSVD
+};
+
+#define BCAST_ACCEPT	1
+#define CAM_ACCEPT	1
+
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+	u64 rx_stats[BGX_RX_STATS_COUNT];
+	u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+	BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+	BGX_MODE_XAUI = 1,  /* 4 lanes, 3.125 Gbaud */
+	BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+	BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+	BGX_MODE_XFI = 3,   /* 1 lane, 10.3125 Gbaud */
+	BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+	BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+	BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+};
+
+enum qlm_mode {
+	QLM_MODE_SGMII,         /* SGMII, each lane independent */
+	QLM_MODE_XAUI_1X4,      /* 1 XAUI or DXAUI, 4 lanes */
+	QLM_MODE_RXAUI_2X2,     /* 2 RXAUI, 2 lanes each */
+	QLM_MODE_XFI_4X1,       /* 4 XFI, 1 lane each */
+	QLM_MODE_XLAUI_1X4,     /* 1 XLAUI, 4 lanes each */
+	QLM_MODE_10G_KR_4X1,    /* 4 10GBASE-KR, 1 lane each */
+	QLM_MODE_40G_KR4_1X4,   /* 1 40GBASE-KR4, 4 lanes each */
+};
+
+#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 7daa088..a79813a 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b96e4bf..8f7aa53 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1025,19 +1025,19 @@
 {
 	struct adapter *adapter = phy->adapter;
 	const struct firmware *fw;
-	char buf[64];
+	const char *fw_name;
 	u32 csum;
 	const __be32 *p;
 	u16 *cache = phy->phy_cache;
-	int i, ret;
+	int i, ret = -EINVAL;
 
-	snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
-
-	ret = request_firmware(&fw, buf, &adapter->pdev->dev);
+	fw_name = get_edc_fw_name(edc_idx);
+	if (fw_name)
+		ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
 	if (ret < 0) {
 		dev_err(&adapter->pdev->dev,
 			"could not upgrade firmware: unable to load %s\n",
-			buf);
+			fw_name);
 		return ret;
 	}
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e052f05..629f75d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,17 +46,19 @@
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
 #include <asm/io.h>
 #include "cxgb4_uld.h"
 
 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
 
 enum {
-	MAX_NPORTS = 4,     /* max # of ports */
-	SERNUM_LEN = 24,    /* Serial # length */
-	EC_LEN     = 16,    /* E/C length */
-	ID_LEN     = 16,    /* ID length */
-	PN_LEN     = 16,    /* Part Number length */
+	MAX_NPORTS	= 4,     /* max # of ports */
+	SERNUM_LEN	= 24,    /* Serial # length */
+	EC_LEN		= 16,    /* E/C length */
+	ID_LEN		= 16,    /* ID length */
+	PN_LEN		= 16,    /* Part Number length */
+	MACADDR_LEN	= 12,    /* MAC Address length */
 };
 
 enum {
@@ -198,23 +200,45 @@
 };
 
 struct tp_tcp_stats {
-	u32 tcpOutRsts;
-	u64 tcpInSegs;
-	u64 tcpOutSegs;
-	u64 tcpRetransSegs;
+	u32 tcp_out_rsts;
+	u64 tcp_in_segs;
+	u64 tcp_out_segs;
+	u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+	u32 frames;
+	u32 drops;
+	u64 octets;
+};
+
+struct tp_fcoe_stats {
+	u32 frames_ddp;
+	u32 frames_drop;
+	u64 octets_ddp;
 };
 
 struct tp_err_stats {
-	u32 macInErrs[4];
-	u32 hdrInErrs[4];
-	u32 tcpInErrs[4];
-	u32 tnlCongDrops[4];
-	u32 ofldChanDrops[4];
-	u32 tnlTxDrops[4];
-	u32 ofldVlanDrops[4];
-	u32 tcp6InErrs[4];
-	u32 ofldNoNeigh;
-	u32 ofldCongDefer;
+	u32 mac_in_errs[4];
+	u32 hdr_in_errs[4];
+	u32 tcp_in_errs[4];
+	u32 tnl_cong_drops[4];
+	u32 ofld_chan_drops[4];
+	u32 tnl_tx_drops[4];
+	u32 ofld_vlan_drops[4];
+	u32 tcp6_in_errs[4];
+	u32 ofld_no_neigh;
+	u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+	u32 req[4];
+	u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+	u32 rqe_dfr_pkt;
+	u32 rqe_dfr_mod;
 };
 
 struct sge_params {
@@ -224,7 +248,6 @@
 };
 
 struct tp_params {
-	unsigned int ntxchan;        /* # of Tx channels */
 	unsigned int tre;            /* log2 of core clocks per TP tick */
 	unsigned int la_mask;        /* what events are recorded by TP LA */
 	unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
@@ -259,6 +282,7 @@
 	u8 sn[SERNUM_LEN + 1];
 	u8 id[ID_LEN + 1];
 	u8 pn[PN_LEN + 1];
+	u8 na[MACADDR_LEN + 1];
 };
 
 struct pci_params {
@@ -273,6 +297,7 @@
 
 #define CHELSIO_T4		0x4
 #define CHELSIO_T5		0x5
+#define CHELSIO_T6		0x6
 
 enum chip_type {
 	T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -284,6 +309,10 @@
 	T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
 	T5_FIRST_REV	= T5_A0,
 	T5_LAST_REV	= T5_A1,
+
+	T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+	T6_FIRST_REV    = T6_A0,
+	T6_LAST_REV     = T6_A0,
 };
 
 struct devlog_params {
@@ -292,6 +321,15 @@
 	u32 size;                       /* size of log */
 };
 
+/* Stores chip specific parameters */
+struct arch_specific_params {
+	u8 nchan;
+	u16 mps_rplc_size;
+	u16 vfcount;
+	u32 sge_fl_db;
+	u16 mps_tcam_size;
+};
+
 struct adapter_params {
 	struct sge_params sge;
 	struct tp_params  tp;
@@ -317,6 +355,7 @@
 	unsigned char nports;             /* # of ethernet ports */
 	unsigned char portvec;
 	enum chip_type chip;               /* chip code */
+	struct arch_specific_params arch;  /* chip specific params */
 	unsigned char offload;
 
 	unsigned char bypass;
@@ -328,6 +367,17 @@
 	unsigned int max_ird_adapter;     /* Max read depth per adapter */
 };
 
+/* State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+	unsigned int idma_1s_thresh;	/* 1s threshold in Core Clock ticks */
+	unsigned int idma_stalled[2];	/* synthesized stalled timers in HZ */
+	unsigned int idma_state[2];	/* IDMA Hang detect state */
+	unsigned int idma_qid[2];	/* IDMA Hung Ingress Queue ID */
+	unsigned int idma_warn[2];	/* time to warning in HZ */
+};
+
 #include "t4fw_api.h"
 
 #define FW_VERSION(chip) ( \
@@ -421,6 +471,7 @@
 	u8     rss_mode;
 	struct link_config link_cfg;
 	u16   *rss;
+	struct port_stats stats_base;
 #ifdef CONFIG_CHELSIO_T4_DCB
 	struct port_dcb_info dcb;     /* Data Center Bridging support */
 #endif
@@ -630,12 +681,7 @@
 	u32 fl_align;               /* response queue message alignment */
 	u32 fl_starve_thres;        /* Free List starvation threshold */
 
-	/* State variables for detecting an SGE Ingress DMA hang */
-	unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
-	unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
-	unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
-	unsigned int idma_qid[2];   /* SGE IDMA Hung Ingress Queue ID */
-
+	struct sge_idma_monitor_state idma_monitor;
 	unsigned int egr_start;
 	unsigned int egr_sz;
 	unsigned int ingr_start;
@@ -644,6 +690,7 @@
 	struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
 	unsigned long *starving_fl;
 	unsigned long *txq_maperr;
+	unsigned long *blocked_fl;
 	struct timer_list rx_timer; /* refills starving FLs */
 	struct timer_list tx_timer; /* checks Tx queues */
 };
@@ -665,6 +712,12 @@
 
 #endif
 
+struct doorbell_stats {
+	u32 db_drop;
+	u32 db_empty;
+	u32 db_full;
+};
+
 struct adapter {
 	void __iomem *regs;
 	void __iomem *bar2;
@@ -672,7 +725,7 @@
 	struct pci_dev *pdev;
 	struct device *pdev_dev;
 	unsigned int mbox;
-	unsigned int fn;
+	unsigned int pf;
 	unsigned int flags;
 	enum chip_type chip;
 
@@ -682,13 +735,12 @@
 	struct cxgb4_virt_res vres;
 	unsigned int swintr;
 
-	unsigned int wol;
-
 	struct {
 		unsigned short vec;
 		char desc[IFNAMSIZ + 10];
 	} msix_info[MAX_INGQ + 1];
 
+	struct doorbell_stats db_stats;
 	struct sge sge;
 
 	struct net_device *port[MAX_NPORTS];
@@ -843,6 +895,16 @@
 	VLAN_REWRITE
 };
 
+static inline int is_offload(const struct adapter *adap)
+{
+	return adap->params.offload;
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+	return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
+}
+
 static inline int is_t5(enum chip_type chip)
 {
 	return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
@@ -887,6 +949,22 @@
 }
 
 /**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW.  Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+				  u8 hw_addr[])
+{
+	ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+	ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
+/**
  * netdev2pinfo - return the port_info structure associated with a net_device
  * @dev: the netdev
  *
@@ -1055,7 +1133,7 @@
 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 		     struct net_device *dev, int intr_idx,
-		     struct sge_fl *fl, rspq_handler_t hnd);
+		     struct sge_fl *fl, rspq_handler_t hnd, int cong);
 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
 			 struct net_device *dev, struct netdev_queue *netdevq,
 			 unsigned int iqid);
@@ -1095,6 +1173,19 @@
 	}
 }
 
+static inline int is_10gbt_device(int device)
+{
+	/* this should be set based upon device capabilities */
+	switch (device) {
+	case 0x4409:
+	case 0x4486:
+		return 1;
+
+	default:
+		return 0;
+	}
+}
+
 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
 {
 	return adap->params.vpd.cclk / 1000;
@@ -1117,9 +1208,19 @@
 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
 		      u32 val);
 
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+			    int size, void *rpl, bool sleep_ok, int timeout);
 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
 		    void *rpl, bool sleep_ok);
 
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+				     const void *cmd, int size, void *rpl,
+				     int timeout)
+{
+	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+				       timeout);
+}
+
 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
 			     int size, void *rpl)
 {
@@ -1147,10 +1248,14 @@
 int t4_slow_intr_handler(struct adapter *adapter);
 
 int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
 		  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
+u32 t4_get_util_window(struct adapter *adap);
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+
 #define T4_MEMORY_WRITE	0
 #define T4_MEMORY_READ	1
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
@@ -1165,10 +1270,16 @@
 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
 
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
 		  unsigned int nwords, u32 *data, int byte_oriented);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_phy_fw(struct adapter *adap,
+		   int win, spinlock_t *lock,
+		   int (*phy_fw_version)(const u8 *, size_t),
+		   const u8 *phy_fw_data, size_t phy_fw_size);
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 		  const u8 *fw_data, unsigned int size, int force);
@@ -1182,7 +1293,7 @@
 int t4_prep_adapter(struct adapter *adapter);
 
 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
 		      unsigned int qid,
 		      enum t4_bar2_qtype qtype,
 		      int user,
@@ -1196,12 +1307,15 @@
 int t4_init_sge_params(struct adapter *adapter);
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
 			int start, int n, const u16 *rspq, unsigned int nrspq);
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
 		       unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+		     unsigned int flags, unsigned int defq);
 int t4_read_rss(struct adapter *adapter, u16 *entries);
 void t4_read_rss_key(struct adapter *adapter, u32 *key);
 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
@@ -1212,10 +1326,7 @@
 u32 t4_read_rss_pf_map(struct adapter *adapter);
 u32 t4_read_rss_pf_mask(struct adapter *adapter);
 
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
-	       u64 *parity);
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
-		u64 *parity);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1227,21 +1338,36 @@
 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
 		 const unsigned int *valp);
 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+			unsigned int *pif_req_wrptr,
+			unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
 const char *t4_get_port_type_description(enum fw_port_type port_type);
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+			      struct port_stats *stats,
+			      struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
 			    unsigned int mask, unsigned int val);
 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
 			 struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+		       struct tp_fcoe_stats *st);
 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
 		  const unsigned short *alpha, const unsigned short *beta);
 
 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
 
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
 
 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
@@ -1260,13 +1386,16 @@
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int nparams, const u32 *params,
 		    u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		       unsigned int vf, unsigned int nparams, const u32 *params,
+		       u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+			  unsigned int pf, unsigned int vf,
+			  unsigned int nparams, const u32 *params,
+			  const u32 *val, int timeout);
 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		  unsigned int vf, unsigned int nparams, const u32 *params,
 		  const u32 *val);
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
-			  unsigned int pf, unsigned int vf,
-			  unsigned int nparams, const u32 *params,
-			  const u32 *val);
 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1275,6 +1404,9 @@
 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
 		unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+	       unsigned int pf, unsigned int vf,
+	       unsigned int viid);
 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		int mtu, int promisc, int all_multi, int bcast, int vlanex,
 		bool sleep_ok);
@@ -1304,6 +1436,7 @@
 		    unsigned int vf, unsigned int eqid);
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
@@ -1311,4 +1444,9 @@
 			 u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
 void t4_free_mem(void *addr);
+void t4_idma_monitor_init(struct adapter *adapter,
+			  struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+		     struct sge_idma_monitor_state *idma,
+		     int hz, int ticks);
 #endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 371f75e..484eb8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -182,6 +182,95 @@
 	.release = seq_release_private
 };
 
+static int cim_pif_la_show(struct seq_file *seq, void *v, int idx)
+{
+	const u32 *p = v;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Cntl ID DataBE   Addr                 Data\n");
+	} else if (idx < CIM_PIFLA_SIZE) {
+		seq_printf(seq, " %02x  %02x  %04x  %08x %08x%08x%08x%08x\n",
+			   (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
+			   p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
+	} else {
+		if (idx == CIM_PIFLA_SIZE)
+			seq_puts(seq, "\nCntl ID               Data\n");
+		seq_printf(seq, " %02x  %02x %08x%08x%08x%08x\n",
+			   (p[4] >> 6) & 0xff, p[4] & 0x3f,
+			   p[3], p[2], p[1], p[0]);
+	}
+	return 0;
+}
+
+static int cim_pif_la_open(struct inode *inode, struct file *file)
+{
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
+			 cim_pif_la_show);
+	if (!p)
+		return -ENOMEM;
+
+	t4_cim_read_pif_la(adap, (u32 *)p->data,
+			   (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+	return 0;
+}
+
+static const struct file_operations cim_pif_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_pif_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+static int cim_ma_la_show(struct seq_file *seq, void *v, int idx)
+{
+	const u32 *p = v;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "\n");
+	} else if (idx < CIM_MALA_SIZE) {
+		seq_printf(seq, "%02x%08x%08x%08x%08x\n",
+			   p[4], p[3], p[2], p[1], p[0]);
+	} else {
+		if (idx == CIM_MALA_SIZE)
+			seq_puts(seq,
+				 "\nCnt ID Tag UE       Data       RDY VLD\n");
+		seq_printf(seq, "%3u %2u  %x   %u %08x%08x  %u   %u\n",
+			   (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
+			   (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
+			   (p[1] >> 2) | ((p[2] & 3) << 30),
+			   (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
+			   p[0] & 1);
+	}
+	return 0;
+}
+
+static int cim_ma_la_open(struct inode *inode, struct file *file)
+{
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
+			 cim_ma_la_show);
+	if (!p)
+		return -ENOMEM;
+
+	t4_cim_read_ma_la(adap, (u32 *)p->data,
+			  (u32 *)p->data + 5 * CIM_MALA_SIZE);
+	return 0;
+}
+
+static const struct file_operations cim_ma_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_ma_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
 static int cim_qcfg_show(struct seq_file *seq, void *v)
 {
 	static const char * const qname[] = {
@@ -663,6 +752,39 @@
 	.write   = pm_stats_clear
 };
 
+static int tx_rate_show(struct seq_file *seq, void *v)
+{
+	u64 nrate[NCHAN], orate[NCHAN];
+	struct adapter *adap = seq->private;
+
+	t4_get_chan_txrate(adap, nrate, orate);
+	if (adap->params.arch.nchan == NCHAN) {
+		seq_puts(seq, "              channel 0   channel 1   "
+			 "channel 2   channel 3\n");
+		seq_printf(seq, "NIC B/s:     %10llu  %10llu  %10llu  %10llu\n",
+			   (unsigned long long)nrate[0],
+			   (unsigned long long)nrate[1],
+			   (unsigned long long)nrate[2],
+			   (unsigned long long)nrate[3]);
+		seq_printf(seq, "Offload B/s: %10llu  %10llu  %10llu  %10llu\n",
+			   (unsigned long long)orate[0],
+			   (unsigned long long)orate[1],
+			   (unsigned long long)orate[2],
+			   (unsigned long long)orate[3]);
+	} else {
+		seq_puts(seq, "              channel 0   channel 1\n");
+		seq_printf(seq, "NIC B/s:     %10llu  %10llu\n",
+			   (unsigned long long)nrate[0],
+			   (unsigned long long)nrate[1]);
+		seq_printf(seq, "Offload B/s: %10llu  %10llu\n",
+			   (unsigned long long)orate[0],
+			   (unsigned long long)orate[1]);
+	}
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+
 static int cctrl_tbl_show(struct seq_file *seq, void *v)
 {
 	static const char * const dec_fac[] = {
@@ -1084,41 +1206,89 @@
 
 static int mps_tcam_show(struct seq_file *seq, void *v)
 {
-	if (v == SEQ_START_TOKEN)
-		seq_puts(seq, "Idx  Ethernet address     Mask     Vld Ports PF"
-			 "  VF              Replication             "
-			 "P0 P1 P2 P3  ML\n");
-	else {
+	struct adapter *adap = seq->private;
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	if (v == SEQ_START_TOKEN) {
+		if (adap->params.arch.mps_rplc_size > 128)
+			seq_puts(seq, "Idx  Ethernet address     Mask     "
+				 "Vld Ports PF  VF                           "
+				 "Replication                                "
+				 "    P0 P1 P2 P3  ML\n");
+		else
+			seq_puts(seq, "Idx  Ethernet address     Mask     "
+				 "Vld Ports PF  VF              Replication"
+				 "	         P0 P1 P2 P3  ML\n");
+	} else {
 		u64 mask;
 		u8 addr[ETH_ALEN];
-		struct adapter *adap = seq->private;
+		bool replicate;
 		unsigned int idx = (uintptr_t)v - 2;
-		u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
-		u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
-		u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
-		u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
-		u32 rplc[4] = {0, 0, 0, 0};
+		u64 tcamy, tcamx, val;
+		u32 cls_lo, cls_hi, ctl;
+		u32 rplc[8] = {0};
+
+		if (chip_ver > CHELSIO_T5) {
+			/* CtlCmdType - 0: Read, 1: Write
+			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
+			 * CtlXYBitSel- 0: Y bit, 1: X bit
+			 */
+
+			/* Read tcamy */
+			ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+			if (idx < 256)
+				ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+			else
+				ctl |= CTLTCAMINDEX_V(idx - 256) |
+				       CTLTCAMSEL_V(1);
+			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+			tcamy = DMACH_G(val) << 32;
+			tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+			/* Read tcamx. Change the control param */
+			ctl |= CTLXYBITSEL_V(1);
+			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+			tcamx = DMACH_G(val) << 32;
+			tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+		} else {
+			tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+			tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+		}
+
+		cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+		cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
 
 		if (tcamx & tcamy) {
 			seq_printf(seq, "%3u         -\n", idx);
 			goto out;
 		}
 
-		if (cls_lo & REPLICATE_F) {
+		rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+		if (chip_ver > CHELSIO_T5)
+			replicate = (cls_lo & T6_REPLICATE_F);
+		else
+			replicate = (cls_lo & REPLICATE_F);
+
+		if (replicate) {
 			struct fw_ldst_cmd ldst_cmd;
 			int ret;
+			struct fw_ldst_mps_rplc mps_rplc;
+			u32 ldst_addrspc;
 
 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+			ldst_addrspc =
+				FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
 			ldst_cmd.op_to_addrspace =
 				htonl(FW_CMD_OP_V(FW_LDST_CMD) |
 				      FW_CMD_REQUEST_F |
 				      FW_CMD_READ_F |
-				      FW_LDST_CMD_ADDRSPACE_V(
-					      FW_LDST_ADDRSPC_MPS));
+				      ldst_addrspc);
 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
-			ldst_cmd.u.mps.fid_ctl =
+			ldst_cmd.u.mps.rplc.fid_idx =
 				htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
-				      FW_LDST_CMD_CTL_V(idx));
+				      FW_LDST_CMD_IDX_V(idx));
 			ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
 					 sizeof(ldst_cmd), &ldst_cmd);
 			if (ret)
@@ -1126,30 +1296,69 @@
 					 "replication map for idx %d: %d\n",
 					 idx, -ret);
 			else {
-				rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
-				rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
-				rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
-				rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+				mps_rplc = ldst_cmd.u.mps.rplc;
+				rplc[0] = ntohl(mps_rplc.rplc31_0);
+				rplc[1] = ntohl(mps_rplc.rplc63_32);
+				rplc[2] = ntohl(mps_rplc.rplc95_64);
+				rplc[3] = ntohl(mps_rplc.rplc127_96);
+				if (adap->params.arch.mps_rplc_size > 128) {
+					rplc[4] = ntohl(mps_rplc.rplc159_128);
+					rplc[5] = ntohl(mps_rplc.rplc191_160);
+					rplc[6] = ntohl(mps_rplc.rplc223_192);
+					rplc[7] = ntohl(mps_rplc.rplc255_224);
+				}
 			}
 		}
 
 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
-		seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
-			   "%3c   %#x%4u%4d",
-			   idx, addr[0], addr[1], addr[2], addr[3], addr[4],
-			   addr[5], (unsigned long long)mask,
-			   (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
-			   PF_G(cls_lo),
-			   (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
-		if (cls_lo & REPLICATE_F)
-			seq_printf(seq, " %08x %08x %08x %08x",
-				   rplc[3], rplc[2], rplc[1], rplc[0]);
+		if (chip_ver > CHELSIO_T5)
+			seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+				   "%012llx%3c   %#x%4u%4d",
+				   idx, addr[0], addr[1], addr[2], addr[3],
+				   addr[4], addr[5], (unsigned long long)mask,
+				   (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+				   PORTMAP_G(cls_hi),
+				   T6_PF_G(cls_lo),
+				   (cls_lo & T6_VF_VALID_F) ?
+				   T6_VF_G(cls_lo) : -1);
 		else
-			seq_printf(seq, "%36c", ' ');
-		seq_printf(seq, "%4u%3u%3u%3u %#x\n",
-			   SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
-			   SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
-			   (cls_lo >> MULTILISTEN0_S) & 0xf);
+			seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+				   "%012llx%3c   %#x%4u%4d",
+				   idx, addr[0], addr[1], addr[2], addr[3],
+				   addr[4], addr[5], (unsigned long long)mask,
+				   (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+				   PORTMAP_G(cls_hi),
+				   PF_G(cls_lo),
+				   (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+		if (replicate) {
+			if (adap->params.arch.mps_rplc_size > 128)
+				seq_printf(seq, " %08x %08x %08x %08x "
+					   "%08x %08x %08x %08x",
+					   rplc[7], rplc[6], rplc[5], rplc[4],
+					   rplc[3], rplc[2], rplc[1], rplc[0]);
+			else
+				seq_printf(seq, " %08x %08x %08x %08x",
+					   rplc[3], rplc[2], rplc[1], rplc[0]);
+		} else {
+			if (adap->params.arch.mps_rplc_size > 128)
+				seq_printf(seq, "%72c", ' ');
+			else
+				seq_printf(seq, "%36c", ' ');
+		}
+
+		if (chip_ver > CHELSIO_T5)
+			seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+				   T6_SRAM_PRIO0_G(cls_lo),
+				   T6_SRAM_PRIO1_G(cls_lo),
+				   T6_SRAM_PRIO2_G(cls_lo),
+				   T6_SRAM_PRIO3_G(cls_lo),
+				   (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
+		else
+			seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+				   SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+				   SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+				   (cls_lo >> MULTILISTEN0_S) & 0xf);
 	}
 out:	return 0;
 }
@@ -1222,7 +1431,7 @@
 	param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
 		    FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
-	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
 			      param, val);
 
 	if (ret < 0 || val[0] == 0)
@@ -1416,6 +1625,9 @@
 	seq_printf(seq, "  HashDelay:     %3d\n", HASHDELAY_G(rssconf));
 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
 		seq_printf(seq, "  VfWrAddr:      %3d\n", VFWRADDR_G(rssconf));
+	else
+		seq_printf(seq, "  VfWrAddr:      %3d\n",
+			   T6_VFWRADDR_G(rssconf));
 	seq_printf(seq, "  KeyMode:       %s\n", keymode[KEYMODE_G(rssconf)]);
 	seq_printf(seq, "  VfWrEn:        %3s\n", yesno(rssconf & VFWREN_F));
 	seq_printf(seq, "  KeyWrEn:       %3s\n", yesno(rssconf & KEYWREN_F));
@@ -1634,14 +1846,14 @@
 	struct adapter *adapter = inode->i_private;
 	struct seq_tab *p;
 	struct rss_vf_conf *vfconf;
-	int vf;
+	int vf, vfcount = adapter->params.arch.vfcount;
 
-	p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+	p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
 	if (!p)
 		return -ENOMEM;
 
 	vfconf = (struct rss_vf_conf *)p->data;
-	for (vf = 0; vf < 128; vf++) {
+	for (vf = 0; vf < vfcount; vf++) {
 		t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
 				      &vfconf[vf].rss_vf_vfh);
 	}
@@ -1959,6 +2171,61 @@
 				 size_mb << 20);
 }
 
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+			       size_t count, loff_t *ppos)
+{
+	int len;
+	const struct adapter *adap = filp->private_data;
+	char *buf;
+	ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+			adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len = snprintf(buf, size - 1, "%*pb\n",
+		       adap->sge.egr_sz, adap->sge.blocked_fl);
+	len += sprintf(buf + len, "\n");
+	size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	t4_free_mem(buf);
+	return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	int err;
+	unsigned long *t;
+	struct adapter *adap = filp->private_data;
+
+	t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+	if (err)
+		return err;
+
+	bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+	t4_free_mem(t);
+	return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+	.owner   = THIS_MODULE,
+	.open    = blocked_fl_open,
+	.read    = blocked_fl_read,
+	.write   = blocked_fl_write,
+	.llseek  = generic_file_llseek,
+};
+
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -1978,11 +2245,13 @@
 int t4_setup_debugfs(struct adapter *adap)
 {
 	int i;
-	u32 size;
+	u32 size = 0;
 	struct dentry *de;
 
 	static struct t4_debugfs_entry t4_debugfs_files[] = {
 		{ "cim_la", &cim_la_fops, S_IRUSR, 0 },
+		{ "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 },
+		{ "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 },
 		{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
 		{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
 		{ "devlog", &devlog_fops, S_IRUSR, 0 },
@@ -2018,10 +2287,12 @@
 		{ "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
 		{ "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
 		{ "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+		{ "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 },
 		{ "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
 #if IS_ENABLED(CONFIG_IPV6)
 		{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
 #endif
+		{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
 	};
 
 	/* Debug FS nodes common to all T5 and later adapters.
@@ -2048,12 +2319,7 @@
 		size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
 		add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
 	}
-	if (is_t4(adap->params.chip)) {
-		size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
-		if (i & EXT_MEM_ENABLE_F)
-			add_debugfs_mem(adap, "mc", MEM_MC,
-					EXT_MEM_SIZE_G(size));
-	} else {
+	if (is_t5(adap->params.chip)) {
 		if (i & EXT_MEM0_ENABLE_F) {
 			size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
 			add_debugfs_mem(adap, "mc0", MEM_MC0,
@@ -2064,6 +2330,11 @@
 			add_debugfs_mem(adap, "mc1", MEM_MC1,
 					EXT_MEM1_SIZE_G(size));
 		}
+	} else {
+		if (i & EXT_MEM_ENABLE_F)
+			size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+			add_debugfs_mem(adap, "mc", MEM_MC,
+					EXT_MEM_SIZE_G(size));
 	}
 
 	de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 10d82b5..687acf7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -108,15 +108,82 @@
 	"VLANinsertions     ",
 	"GROpackets         ",
 	"GROmerged          ",
-	"WriteCoalSuccess   ",
-	"WriteCoalFail      ",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+	"db_drop                ",
+	"db_full                ",
+	"db_empty               ",
+	"tcp_ipv4_out_rsts      ",
+	"tcp_ipv4_in_segs       ",
+	"tcp_ipv4_out_segs      ",
+	"tcp_ipv4_retrans_segs  ",
+	"tcp_ipv6_out_rsts      ",
+	"tcp_ipv6_in_segs       ",
+	"tcp_ipv6_out_segs      ",
+	"tcp_ipv6_retrans_segs  ",
+	"usm_ddp_frames         ",
+	"usm_ddp_octets         ",
+	"usm_ddp_drops          ",
+	"rdma_no_rqe_mod_defer  ",
+	"rdma_no_rqe_pkt_defer  ",
+	"tp_err_ofld_no_neigh   ",
+	"tp_err_ofld_cong_defer ",
+	"write_coal_success     ",
+	"write_coal_fail        ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+	"--------Channel--------- ",
+	"tp_cpl_requests        ",
+	"tp_cpl_responses       ",
+	"tp_mac_in_errs         ",
+	"tp_hdr_in_errs         ",
+	"tp_tcp_in_errs         ",
+	"tp_tcp6_in_errs        ",
+	"tp_tnl_cong_drops      ",
+	"tp_tnl_tx_drops        ",
+	"tp_ofld_vlan_drops     ",
+	"tp_ofld_chan_drops     ",
+	"fcoe_octets_ddp        ",
+	"fcoe_frames_ddp        ",
+	"fcoe_frames_drop       ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+	"-------Loopback----------- ",
+	"octets_ok              ",
+	"frames_ok              ",
+	"bcast_frames           ",
+	"mcast_frames           ",
+	"ucast_frames           ",
+	"error_frames           ",
+	"frames_64              ",
+	"frames_65_to_127       ",
+	"frames_128_to_255      ",
+	"frames_256_to_511      ",
+	"frames_512_to_1023     ",
+	"frames_1024_to_1518    ",
+	"frames_1519_to_max     ",
+	"frames_dropped         ",
+	"bg0_frames_dropped     ",
+	"bg1_frames_dropped     ",
+	"bg2_frames_dropped     ",
+	"bg3_frames_dropped     ",
+	"bg0_frames_trunc       ",
+	"bg1_frames_trunc       ",
+	"bg2_frames_trunc       ",
+	"bg3_frames_trunc       ",
 };
 
 static int get_sset_count(struct net_device *dev, int sset)
 {
 	switch (sset) {
 	case ETH_SS_STATS:
-		return ARRAY_SIZE(stats_strings);
+		return ARRAY_SIZE(stats_strings) +
+		       ARRAY_SIZE(adapter_stats_strings) +
+		       ARRAY_SIZE(channel_stats_strings) +
+		       ARRAY_SIZE(loopback_stats_strings);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -168,8 +235,18 @@
 
 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
-	if (stringset == ETH_SS_STATS)
+	if (stringset == ETH_SS_STATS) {
 		memcpy(data, stats_strings, sizeof(stats_strings));
+		data += sizeof(stats_strings);
+		memcpy(data, adapter_stats_strings,
+		       sizeof(adapter_stats_strings));
+		data += sizeof(adapter_stats_strings);
+		memcpy(data, channel_stats_strings,
+		       sizeof(channel_stats_strings));
+		data += sizeof(channel_stats_strings);
+		memcpy(data, loopback_stats_strings,
+		       sizeof(loopback_stats_strings));
+	}
 }
 
 /* port stats maintained per queue of the port. They should be in the same
@@ -185,6 +262,45 @@
 	u64 gro_merged;
 };
 
+struct adapter_stats {
+	u64 db_drop;
+	u64 db_full;
+	u64 db_empty;
+	u64 tcp_v4_out_rsts;
+	u64 tcp_v4_in_segs;
+	u64 tcp_v4_out_segs;
+	u64 tcp_v4_retrans_segs;
+	u64 tcp_v6_out_rsts;
+	u64 tcp_v6_in_segs;
+	u64 tcp_v6_out_segs;
+	u64 tcp_v6_retrans_segs;
+	u64 frames;
+	u64 octets;
+	u64 drops;
+	u64 rqe_dfr_mod;
+	u64 rqe_dfr_pkt;
+	u64 ofld_no_neigh;
+	u64 ofld_cong_defer;
+	u64 wc_success;
+	u64 wc_fail;
+};
+
+struct channel_stats {
+	u64 cpl_req;
+	u64 cpl_rsp;
+	u64 mac_in_errs;
+	u64 hdr_in_errs;
+	u64 tcp_in_errs;
+	u64 tcp6_in_errs;
+	u64 tnl_cong_drops;
+	u64 tnl_tx_drops;
+	u64 ofld_vlan_drops;
+	u64 ofld_chan_drops;
+	u64 octets_ddp;
+	u64 frames_ddp;
+	u64 frames_drop;
+};
+
 static void collect_sge_port_stats(const struct adapter *adap,
 				   const struct port_info *p,
 				   struct queue_port_stats *s)
@@ -205,30 +321,121 @@
 	}
 }
 
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+	struct tp_tcp_stats v4, v6;
+	struct tp_rdma_stats rdma_stats;
+	struct tp_err_stats err_stats;
+	struct tp_usm_stats usm_stats;
+	u64 val1, val2;
+
+	memset(s, 0, sizeof(*s));
+
+	spin_lock(&adap->stats_lock);
+	t4_tp_get_tcp_stats(adap, &v4, &v6);
+	t4_tp_get_rdma_stats(adap, &rdma_stats);
+	t4_get_usm_stats(adap, &usm_stats);
+	t4_tp_get_err_stats(adap, &err_stats);
+	spin_unlock(&adap->stats_lock);
+
+	s->db_drop = adap->db_stats.db_drop;
+	s->db_full = adap->db_stats.db_full;
+	s->db_empty = adap->db_stats.db_empty;
+
+	s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+	s->tcp_v4_in_segs = v4.tcp_in_segs;
+	s->tcp_v4_out_segs = v4.tcp_out_segs;
+	s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+	s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+	s->tcp_v6_in_segs = v6.tcp_in_segs;
+	s->tcp_v6_out_segs = v6.tcp_out_segs;
+	s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+	if (is_offload(adap)) {
+		s->frames = usm_stats.frames;
+		s->octets = usm_stats.octets;
+		s->drops = usm_stats.drops;
+		s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+		s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+	}
+
+	s->ofld_no_neigh = err_stats.ofld_no_neigh;
+	s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+	if (!is_t4(adap->params.chip)) {
+		int v;
+
+		v = t4_read_reg(adap, SGE_STAT_CFG_A);
+		if (STATSOURCE_T5_G(v) == 7) {
+			val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+			val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+			s->wc_success = val1 - val2;
+			s->wc_fail = val2;
+		}
+	}
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+				  u8 i)
+{
+	struct tp_cpl_stats cpl_stats;
+	struct tp_err_stats err_stats;
+	struct tp_fcoe_stats fcoe_stats;
+
+	memset(s, 0, sizeof(*s));
+
+	spin_lock(&adap->stats_lock);
+	t4_tp_get_cpl_stats(adap, &cpl_stats);
+	t4_tp_get_err_stats(adap, &err_stats);
+	t4_get_fcoe_stats(adap, i, &fcoe_stats);
+	spin_unlock(&adap->stats_lock);
+
+	s->cpl_req = cpl_stats.req[i];
+	s->cpl_rsp = cpl_stats.rsp[i];
+	s->mac_in_errs = err_stats.mac_in_errs[i];
+	s->hdr_in_errs = err_stats.hdr_in_errs[i];
+	s->tcp_in_errs = err_stats.tcp_in_errs[i];
+	s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+	s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+	s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+	s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+	s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+	s->octets_ddp = fcoe_stats.octets_ddp;
+	s->frames_ddp = fcoe_stats.frames_ddp;
+	s->frames_drop = fcoe_stats.frames_drop;
+}
+
 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 		      u64 *data)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
-	u32 val1, val2;
+	struct lb_port_stats s;
+	int i;
+	u64 *p0;
 
-	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+	t4_get_port_stats_offset(adapter, pi->tx_chan,
+				 (struct port_stats *)data,
+				 &pi->stats_base);
 
 	data += sizeof(struct port_stats) / sizeof(u64);
 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
 	data += sizeof(struct queue_port_stats) / sizeof(u64);
-	if (!is_t4(adapter->params.chip)) {
-		t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
-		val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
-		val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
-		*data = val1 - val2;
-		data++;
-		*data = val2;
-		data++;
-	} else {
-		memset(data, 0, 2 * sizeof(u64));
-		*data += 2;
-	}
+	collect_adapter_stats(adapter, (struct adapter_stats *)data);
+	data += sizeof(struct adapter_stats) / sizeof(u64);
+
+	*data++ = (u64)pi->port_id;
+	collect_channel_stats(adapter, (struct channel_stats *)data,
+			      pi->port_id);
+	data += sizeof(struct channel_stats) / sizeof(u64);
+
+	*data++ = (u64)pi->port_id;
+	memset(&s, 0, sizeof(s));
+	t4_get_lb_stats(adapter, pi->port_id, &s);
+
+	p0 = &s.octets;
+	for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+		*data++ = (unsigned long long)*p0++;
 }
 
 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -250,7 +457,7 @@
 		return -EAGAIN;
 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
 		return -EINVAL;
-	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+	t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
 	return 0;
 }
 
@@ -267,7 +474,7 @@
 	else
 		return -EINVAL;
 
-	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+	return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
 }
 
 static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
@@ -439,7 +646,7 @@
 	lc->autoneg = cmd->autoneg;
 
 	if (netif_running(dev))
-		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+		return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
 				     lc);
 	return 0;
 }
@@ -472,7 +679,7 @@
 	if (epause->tx_pause)
 		lc->requested_fc |= PAUSE_TX;
 	if (netif_running(dev))
-		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+		return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
 				     lc);
 	return 0;
 }
@@ -578,7 +785,7 @@
 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
 
 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
-	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
 	c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
 	return 0;
@@ -617,7 +824,7 @@
  */
 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
 {
-	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
 
 	if (vaddr >= 0)
 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -626,7 +833,7 @@
 
 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
 {
-	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
 
 	if (vaddr >= 0)
 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -669,8 +876,8 @@
 	aligned_offset = eeprom->offset & ~3;
 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
 
-	if (adapter->fn > 0) {
-		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+	if (adapter->pf > 0) {
+		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
 
 		if (aligned_offset < start ||
 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
@@ -740,37 +947,6 @@
 	return ret;
 }
 
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-	wol->supported = WAKE_BCAST | WAKE_MAGIC;
-	wol->wolopts = netdev2adap(dev)->wol;
-	memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-	int err = 0;
-	struct port_info *pi = netdev_priv(dev);
-
-	if (wol->wolopts & ~WOL_SUPPORTED)
-		return -EINVAL;
-	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
-			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
-	if (wol->wolopts & WAKE_BCAST) {
-		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
-					~0ULL, 0, false);
-		if (!err)
-			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
-						~6ULL, ~0ULL, BCAST_CRC, true);
-	} else {
-		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
-	}
-	return err;
-}
-
 static u32 get_rss_table_size(struct net_device *dev)
 {
 	const struct port_info *pi = netdev_priv(dev);
@@ -900,8 +1076,6 @@
 	.get_ethtool_stats = get_stats,
 	.get_regs_len      = get_regs_len,
 	.get_regs          = get_regs,
-	.get_wol           = get_wol,
-	.set_wol           = set_wol,
 	.get_rxnfc         = get_rxnfc,
 	.get_rxfh_indir_size = get_rss_table_size,
 	.get_rxfh	   = get_rss_table,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a935559..c64b5a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -135,8 +135,14 @@
 
 #define FW4_FNAME "cxgb4/t4fw.bin"
 #define FW5_FNAME "cxgb4/t5fw.bin"
+#define FW6_FNAME "cxgb4/t6fw.bin"
 #define FW4_CFNAME "cxgb4/t4-config.txt"
 #define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
+#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
+#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
+#define PHY_AQ1202_DEVICEID 0x4409
+#define PHY_BCM84834_DEVICEID 0x4486
 
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_AUTHOR("Chelsio Communications");
@@ -318,8 +324,9 @@
 		 * level") we need to issue the Set Parameters Commannd
 		 * without sleeping (timeout < 0).
 		 */
-		err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
-					    &name, &value);
+		err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+					    &name, &value,
+					    -FW_CMD_MAX_TIMEOUT);
 
 		if (err)
 			dev_err(adap->pdev_dev,
@@ -382,7 +389,7 @@
 	int uc_cnt = netdev_uc_count(dev);
 	int mc_cnt = netdev_mc_count(dev);
 	const struct port_info *pi = netdev_priv(dev);
-	unsigned int mb = pi->adapter->fn;
+	unsigned int mb = pi->adapter->pf;
 
 	/* first do the secondary unicast addresses */
 	netdev_for_each_uc_addr(ha, dev) {
@@ -439,7 +446,7 @@
 
 	ret = set_addr_filters(dev, sleep_ok);
 	if (ret == 0)
-		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+		ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 				    sleep_ok);
@@ -456,7 +463,7 @@
 {
 	int ret;
 	struct port_info *pi = netdev_priv(dev);
-	unsigned int mb = pi->adapter->fn;
+	unsigned int mb = pi->adapter->pf;
 
 	/*
 	 * We do not set address filters and promiscuity here, the stack does
@@ -474,7 +481,7 @@
 		}
 	}
 	if (ret == 0)
-		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+		ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
 				    &pi->link_cfg);
 	if (ret == 0) {
 		local_bh_disable();
@@ -856,23 +863,39 @@
  *
  *	Sets up the portion of the HW RSS table for the port's VI to distribute
  *	packets to the Rx queues in @queues.
+ *	Should never be called before setting up sge eth rx queues
  */
 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
 {
 	u16 *rss;
 	int i, err;
-	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+	struct adapter *adapter = pi->adapter;
+	const struct sge_eth_rxq *rxq;
 
+	rxq = &adapter->sge.ethrxq[pi->first_qset];
 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
 	if (!rss)
 		return -ENOMEM;
 
 	/* map the queue indices to queue ids */
 	for (i = 0; i < pi->rss_size; i++, queues++)
-		rss[i] = q[*queues].rspq.abs_id;
+		rss[i] = rxq[*queues].rspq.abs_id;
 
-	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+	err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
 				  pi->rss_size, rss, pi->rss_size);
+	/* If Tunnel All Lookup isn't specified in the global RSS
+	 * Configuration, then we need to specify a default Ingress
+	 * Queue for any ingress packets which aren't hashed.  We'll
+	 * use our first ingress queue ...
+	 */
+	if (!err)
+		err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+				       FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+				       rss[0]);
 	kfree(rss);
 	return err;
 }
@@ -885,11 +908,15 @@
  */
 static int setup_rss(struct adapter *adap)
 {
-	int i, err;
+	int i, j, err;
 
 	for_each_port(adap, i) {
 		const struct port_info *pi = adap2pinfo(adap, i);
 
+		/* Fill default values with equal distribution */
+		for (j = 0; j < pi->rss_size; j++)
+			pi->rss[j] = j % pi->nqsets;
+
 		err = cxgb4_write_rss(pi, pi->rss);
 		if (err)
 			return err;
@@ -977,7 +1004,7 @@
 		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
 				       adap->port[i / per_chan],
 				       msi_idx, q->fl.size ? &q->fl : NULL,
-				       uldrx_handler);
+				       uldrx_handler, 0);
 		if (err)
 			return err;
 		memset(&q->stats, 0, sizeof(q->stats));
@@ -1007,7 +1034,7 @@
 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
 	else {
 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
-				       NULL, NULL);
+				       NULL, NULL, -1);
 		if (err)
 			return err;
 		msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1027,7 +1054,7 @@
 	 *    new/deleted queues.
 	 */
 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
-			       msi_idx, NULL, fwevtq_handler);
+			       msi_idx, NULL, fwevtq_handler, -1);
 	if (err) {
 freeout:	t4_free_sge_resources(adap);
 		return err;
@@ -1044,7 +1071,9 @@
 				msi_idx++;
 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
 					       msi_idx, &q->fl,
-					       t4_ethrx_handler);
+					       t4_ethrx_handler,
+					       t4_get_mps_bg_map(adap,
+								 pi->tx_chan));
 			if (err)
 				goto freeout;
 			q->rspq.idx = j;
@@ -1324,11 +1353,6 @@
 	return fallback(dev, skb) % dev->real_num_tx_queues;
 }
 
-static inline int is_offload(const struct adapter *adap)
-{
-	return adap->params.offload;
-}
-
 static int closest_timer(const struct sge *s, int time)
 {
 	int i, delta, match = 0, min_delta = INT_MAX;
@@ -1389,8 +1413,8 @@
 			    FW_PARAMS_PARAM_X_V(
 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
 			    FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
-			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
-					    &new_idx);
+			err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+					    &v, &new_idx);
 			if (err)
 				return err;
 		}
@@ -1398,7 +1422,7 @@
 	}
 
 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
-	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+	q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
 	return 0;
 }
 
@@ -1411,7 +1435,7 @@
 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
 		return 0;
 
-	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+	err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
 			    -1, -1, -1,
 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
 	if (unlikely(err))
@@ -1694,7 +1718,7 @@
 	bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
 	/* Reserve stid 0 for T4/T5 adapters */
 	if (!t->stid_base &&
-	    (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+	    (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
 		__set_bit(0, t->stid_bmap);
 
 	return 0;
@@ -1983,11 +2007,8 @@
 int cxgb4_flush_eq_cache(struct net_device *dev)
 {
 	struct adapter *adap = netdev2adap(dev);
-	int ret;
 
-	ret = t4_fwaddrspace_write(adap, adap->mbox,
-				   0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
-	return ret;
+	return t4_sge_ctxt_flush(adap, adap->mbox);
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
@@ -2042,25 +2063,6 @@
 }
 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
 
-void cxgb4_disable_db_coalescing(struct net_device *dev)
-{
-	struct adapter *adap;
-
-	adap = netdev2adap(dev);
-	t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
-			 NOCOALESCE_F);
-}
-EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
-
-void cxgb4_enable_db_coalescing(struct net_device *dev)
-{
-	struct adapter *adap;
-
-	adap = netdev2adap(dev);
-	t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
-}
-EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
-
 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
 {
 	struct adapter *adap;
@@ -2100,10 +2102,7 @@
 		if (offset < mc0_end) {
 			memtype = MEM_MC0;
 			memaddr = offset - edc1_end;
-		} else if (is_t4(adap->params.chip)) {
-			/* T4 only has a single memory channel */
-			goto err;
-		} else {
+		} else if (is_t5(adap->params.chip)) {
 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
 			mc1_size = EXT_MEM1_SIZE_G(size) << 20;
 			mc1_end = mc0_end + mc1_size;
@@ -2114,6 +2113,9 @@
 				/* offset beyond the end of any memory */
 				goto err;
 			}
+		} else {
+			/* T4/T6 only has a single memory channel */
+			goto err;
 		}
 	}
 
@@ -2149,7 +2151,7 @@
 			 u64 *pbar2_qoffset,
 			 unsigned int *pbar2_qid)
 {
-	return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+	return t4_bar2_sge_qregs(netdev2adap(dev),
 				 qid,
 				 (qtype == CXGB4_BAR2_QTYPE_EGRESS
 				  ? T4_BAR2_QTYPE_EGRESS
@@ -2280,9 +2282,13 @@
 	drain_db_fifo(adap, dbfifo_drain_delay);
 	enable_dbs(adap);
 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-	t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
-			 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
-			 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+	else
+		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+				 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
 }
 
 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -2344,7 +2350,7 @@
 		drain_db_fifo(adap, dbfifo_drain_delay);
 		enable_dbs(adap);
 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-	} else {
+	} else if (is_t5(adap->params.chip)) {
 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
 		u16 qid = (dropped_db >> 15) & 0x1ffff;
 		u16 pidx_inc = dropped_db & 0x1fff;
@@ -2352,8 +2358,8 @@
 		unsigned int bar2_qid;
 		int ret;
 
-		ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
-					      0, &bar2_qoffset, &bar2_qid);
+		ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+					0, &bar2_qoffset, &bar2_qid);
 		if (ret)
 			dev_err(adap->pdev_dev, "doorbell drop recovery: "
 				"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
@@ -2365,7 +2371,8 @@
 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
 	}
 
-	t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
 }
 
 void t4_db_full(struct adapter *adap)
@@ -2395,7 +2402,7 @@
 	unsigned short i;
 
 	lli.pdev = adap->pdev;
-	lli.pf = adap->fn;
+	lli.pf = adap->pf;
 	lli.l2t = adap->l2t;
 	lli.tids = &adap->tids;
 	lli.ports = adap->port;
@@ -2434,6 +2441,7 @@
 	lli.max_ordird_qp = adap->params.max_ordird_qp;
 	lli.max_ird_adapter = adap->params.max_ird_adapter;
 	lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+	lli.nodeid = dev_to_node(adap->pdev_dev);
 
 	handle = ulds[uld].add(&lli);
 	if (IS_ERR(handle)) {
@@ -2731,7 +2739,7 @@
 
 	netif_tx_stop_all_queues(dev);
 	netif_carrier_off(dev);
-	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+	return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
 }
 
 /* Return an error number if the indicated filter isn't writable ...
@@ -2875,7 +2883,8 @@
 		spin_unlock(&adapter->stats_lock);
 		return ns;
 	}
-	t4_get_port_stats(adapter, p->tx_chan, &stats);
+	t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+				 &p->stats_base);
 	spin_unlock(&adapter->stats_lock);
 
 	ns->tx_bytes   = stats.tx_octets;
@@ -2934,7 +2943,7 @@
 		} else
 			return -EINVAL;
 
-		mbox = pi->adapter->fn;
+		mbox = pi->adapter->pf;
 		if (cmd == SIOCGMIIREG)
 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
 					 data->reg_num, &data->val_out);
@@ -2961,7 +2970,7 @@
 
 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
 		return -EINVAL;
-	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+	ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
 			    -1, -1, -1, true);
 	if (!ret)
 		dev->mtu = new_mtu;
@@ -2977,7 +2986,7 @@
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
-	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+	ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
 			    pi->xact_addr_filt, addr->sa_data, true, true);
 	if (ret < 0)
 		return ret;
@@ -3036,86 +3045,11 @@
 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
 
-/* Return the specified PCI-E Configuration Space register from our Physical
- * Function.  We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
-{
-	struct fw_ldst_cmd ldst_cmd;
-	u32 val;
-	int ret;
-
-	/* Construct and send the Firmware LDST Command to retrieve the
-	 * specified PCI-E Configuration Space register.
-	 */
-	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
-	ldst_cmd.op_to_addrspace =
-		htonl(FW_CMD_OP_V(FW_LDST_CMD) |
-		      FW_CMD_REQUEST_F |
-		      FW_CMD_READ_F |
-		      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
-	ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
-	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
-	ldst_cmd.u.pcie.ctrl_to_fn =
-		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
-	ldst_cmd.u.pcie.r = reg;
-	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
-			 &ldst_cmd);
-
-	/* If the LDST Command suucceeded, exctract the returned register
-	 * value.  Otherwise read it directly ourself.
-	 */
-	if (ret == 0)
-		val = ntohl(ldst_cmd.u.pcie.data[0]);
-	else
-		t4_hw_pci_read_cfg4(adap, reg, &val);
-
-	return val;
-}
-
 static void setup_memwin(struct adapter *adap)
 {
-	u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
+	u32 nic_win_base = t4_get_util_window(adap);
 
-	if (is_t4(adap->params.chip)) {
-		u32 bar0;
-
-		/* Truncation intentional: we only read the bottom 32-bits of
-		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
-		 * mechanism to read BAR0 instead of using
-		 * pci_resource_start() because we could be operating from
-		 * within a Virtual Machine which is trapping our accesses to
-		 * our Configuration Space and we need to set up the PCI-E
-		 * Memory Window decoders with the actual addresses which will
-		 * be coming across the PCI-E link.
-		 */
-		bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
-		bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-		adap->t4_bar0 = bar0;
-
-		mem_win0_base = bar0 + MEMWIN0_BASE;
-		mem_win1_base = bar0 + MEMWIN1_BASE;
-		mem_win2_base = bar0 + MEMWIN2_BASE;
-		mem_win2_aperture = MEMWIN2_APERTURE;
-	} else {
-		/* For T5, only relative offset inside the PCIe BAR is passed */
-		mem_win0_base = MEMWIN0_BASE;
-		mem_win1_base = MEMWIN1_BASE;
-		mem_win2_base = MEMWIN2_BASE_T5;
-		mem_win2_aperture = MEMWIN2_APERTURE_T5;
-	}
-	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
-		     mem_win0_base | BIR_V(0) |
-		     WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
-	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
-		     mem_win1_base | BIR_V(0) |
-		     WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
-	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
-		     mem_win2_base | BIR_V(0) |
-		     WINDOW_V(ilog2(mem_win2_aperture) - 10));
-	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
+	t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
 }
 
 static void setup_memwin_rdma(struct adapter *adap)
@@ -3149,7 +3083,7 @@
 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
 			       FW_CMD_REQUEST_F | FW_CMD_READ_F);
 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
-	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
 	if (ret < 0)
 		return ret;
 
@@ -3165,18 +3099,18 @@
 	}
 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
 	if (ret < 0)
 		return ret;
 
-	ret = t4_config_glbl_rss(adap, adap->fn,
+	ret = t4_config_glbl_rss(adap, adap->pf,
 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
 	if (ret < 0)
 		return ret;
 
-	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+	ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
 			  MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
 			  FW_CMD_CAP_PF);
 	if (ret < 0)
@@ -3220,7 +3154,7 @@
 	}
 
 	/* get basic stuff going */
-	return t4_early_init(adap, adap->fn);
+	return t4_early_init(adap, adap->pf);
 }
 
 /*
@@ -3276,6 +3210,142 @@
 	return 0;
 }
 
+/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
+ * unto themselves and they contain their own firmware to perform their
+ * tasks ...
+ */
+static int phy_aq1202_version(const u8 *phy_fw_data,
+			      size_t phy_fw_size)
+{
+	int offset;
+
+	/* At offset 0x8 you're looking for the primary image's
+	 * starting offset which is 3 Bytes wide
+	 *
+	 * At offset 0xa of the primary image, you look for the offset
+	 * of the DRAM segment which is 3 Bytes wide.
+	 *
+	 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
+	 * wide
+	 */
+	#define be16(__p) (((__p)[0] << 8) | (__p)[1])
+	#define le16(__p) ((__p)[0] | ((__p)[1] << 8))
+	#define le24(__p) (le16(__p) | ((__p)[2] << 16))
+
+	offset = le24(phy_fw_data + 0x8) << 12;
+	offset = le24(phy_fw_data + offset + 0xa);
+	return be16(phy_fw_data + offset + 0x27e);
+
+	#undef be16
+	#undef le16
+	#undef le24
+}
+
+static struct info_10gbt_phy_fw {
+	unsigned int phy_fw_id;		/* PCI Device ID */
+	char *phy_fw_file;		/* /lib/firmware/ PHY Firmware file */
+	int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
+	int phy_flash;			/* Has FLASH for PHY Firmware */
+} phy_info_array[] = {
+	{
+		PHY_AQ1202_DEVICEID,
+		PHY_AQ1202_FIRMWARE,
+		phy_aq1202_version,
+		1,
+	},
+	{
+		PHY_BCM84834_DEVICEID,
+		PHY_BCM84834_FIRMWARE,
+		NULL,
+		0,
+	},
+	{ 0, NULL, NULL },
+};
+
+static struct info_10gbt_phy_fw *find_phy_info(int devid)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
+		if (phy_info_array[i].phy_fw_id == devid)
+			return &phy_info_array[i];
+	}
+	return NULL;
+}
+
+/* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
+ * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
+ * we return a negative error number.  If we transfer new firmware we return 1
+ * (from t4_load_phy_fw()).  If we don't do anything we return 0.
+ */
+static int adap_init0_phy(struct adapter *adap)
+{
+	const struct firmware *phyf;
+	int ret;
+	struct info_10gbt_phy_fw *phy_info;
+
+	/* Use the device ID to determine which PHY file to flash.
+	 */
+	phy_info = find_phy_info(adap->pdev->device);
+	if (!phy_info) {
+		dev_warn(adap->pdev_dev,
+			 "No PHY Firmware file found for this PHY\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
+	 * use that. The adapter firmware provides us with a memory buffer
+	 * where we can load a PHY firmware file from the host if we want to
+	 * override the PHY firmware File in flash.
+	 */
+	ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
+				      adap->pdev_dev);
+	if (ret < 0) {
+		/* For adapters without FLASH attached to PHY for their
+		 * firmware, it's obviously a fatal error if we can't get the
+		 * firmware to the adapter.  For adapters with PHY firmware
+		 * FLASH storage, it's worth a warning if we can't find the
+		 * PHY Firmware but we'll neuter the error ...
+		 */
+		dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
+			"/lib/firmware/%s, error %d\n",
+			phy_info->phy_fw_file, -ret);
+		if (phy_info->phy_flash) {
+			int cur_phy_fw_ver = 0;
+
+			t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+			dev_warn(adap->pdev_dev, "continuing with, on-adapter "
+				 "FLASH copy, version %#x\n", cur_phy_fw_ver);
+			ret = 0;
+		}
+
+		return ret;
+	}
+
+	/* Load PHY Firmware onto adapter.
+	 */
+	ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
+			     phy_info->phy_fw_version,
+			     (u8 *)phyf->data, phyf->size);
+	if (ret < 0)
+		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+			-ret);
+	else if (ret > 0) {
+		int new_phy_fw_ver = 0;
+
+		if (phy_info->phy_fw_version)
+			new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
+								  phyf->size);
+		dev_info(adap->pdev_dev, "Successfully transferred PHY "
+			 "Firmware /lib/firmware/%s, version %#x\n",
+			 phy_info->phy_fw_file, new_phy_fw_ver);
+	}
+
+	release_firmware(phyf);
+
+	return ret;
+}
+
 /*
  * Attempt to initialize the adapter via a Firmware Configuration File.
  */
@@ -3300,6 +3370,16 @@
 			goto bye;
 	}
 
+	/* If this is a 10Gb/s-BT adapter make sure the chip-external
+	 * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
+	 * to be performed after any global adapter RESET above since some
+	 * PHYs only have local RAM copies of the PHY firmware.
+	 */
+	if (is_10gbt_device(adapter->pdev->device)) {
+		ret = adap_init0_phy(adapter);
+		if (ret < 0)
+			goto bye;
+	}
 	/*
 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
 	 * then use that.  Otherwise, use the configuration file stored
@@ -3312,6 +3392,9 @@
 	case CHELSIO_T5:
 		fw_config_file = FW5_CFNAME;
 		break;
+	case CHELSIO_T6:
+		fw_config_file = FW6_CFNAME;
+		break;
 	default:
 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
 		       adapter->pdev->device);
@@ -3337,7 +3420,7 @@
 			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
 			ret = t4_query_params(adapter, adapter->mbox,
-					      adapter->fn, 0, 1, params, val);
+					      adapter->pf, 0, 1, params, val);
 			if (ret == 0) {
 				/*
 				 * For t4_memory_rw() below addresses and
@@ -3508,7 +3591,24 @@
 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
 		},
+	}, {
+		.chip = CHELSIO_T6,
+		.fs_name = FW6_CFNAME,
+		.fw_mod_name = FW6_FNAME,
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T6,
+			.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+			.intfver_nic = FW_INTFVER(T6, NIC),
+			.intfver_vnic = FW_INTFVER(T6, VNIC),
+			.intfver_ofld = FW_INTFVER(T6, OFLD),
+			.intfver_ri = FW_INTFVER(T6, RI),
+			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
+			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+			.intfver_fcoe = FW_INTFVER(T6, FCOE),
+		},
 	}
+
 };
 
 static struct fw_info *find_fw_info(int chip)
@@ -3614,7 +3714,7 @@
 	 * the firmware.  On the other hand, we need these fairly early on
 	 * so we do this right after getting ahold of the firmware.
 	 */
-	ret = get_vpd_params(adap, &adap->params.vpd);
+	ret = t4_get_vpd_params(adap, &adap->params.vpd);
 	if (ret < 0)
 		goto bye;
 
@@ -3626,7 +3726,7 @@
 	v =
 	    FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
-	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
 	if (ret < 0)
 		goto bye;
 
@@ -3649,7 +3749,7 @@
 		 */
 		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
 				      params, val);
 
 		/* If the firmware doesn't support Configuration Files,
@@ -3708,7 +3808,7 @@
 	params[3] = FW_PARAM_PFVF(FILTER_START);
 	params[4] = FW_PARAM_PFVF(FILTER_END);
 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
-	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
 	if (ret < 0)
 		goto bye;
 	adap->sge.egr_start = val[0];
@@ -3726,7 +3826,7 @@
 	 */
 	params[0] = FW_PARAM_PFVF(EQ_END);
 	params[1] = FW_PARAM_PFVF(IQFLINT_END);
-	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
 	if (ret < 0)
 		goto bye;
 	adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
@@ -3747,7 +3847,7 @@
 	}
 
 	/* Allocate the memory for the vaious egress queue bitmaps
-	 * ie starving_fl and txq_maperr.
+	 * ie starving_fl, txq_maperr and blocked_fl.
 	 */
 	adap->sge.starving_fl =	kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
 					sizeof(long), GFP_KERNEL);
@@ -3763,9 +3863,18 @@
 		goto bye;
 	}
 
+#ifdef CONFIG_DEBUG_FS
+	adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+				       sizeof(long), GFP_KERNEL);
+	if (!adap->sge.blocked_fl) {
+		ret = -ENOMEM;
+		goto bye;
+	}
+#endif
+
 	params[0] = FW_PARAM_PFVF(CLIP_START);
 	params[1] = FW_PARAM_PFVF(CLIP_END);
-	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
 	if (ret < 0)
 		goto bye;
 	adap->clipt_start = val[0];
@@ -3774,7 +3883,7 @@
 	/* query params related to active filter region */
 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
-	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
 	/* If Active filter size is set we enable establishing
 	 * offload connection through firmware work request
 	 */
@@ -3791,7 +3900,7 @@
 	 */
 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
 	val[0] = 1;
-	(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+	(void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
 
 	/*
 	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
@@ -3803,7 +3912,7 @@
 		adap->params.ulptx_memwrite_dsgl = false;
 	} else {
 		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
 				      1, params, val);
 		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
 	}
@@ -3829,7 +3938,7 @@
 		params[3] = FW_PARAM_PFVF(TDDP_START);
 		params[4] = FW_PARAM_PFVF(TDDP_END);
 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
 				      params, val);
 		if (ret < 0)
 			goto bye;
@@ -3867,7 +3976,7 @@
 		params[3] = FW_PARAM_PFVF(RQ_END);
 		params[4] = FW_PARAM_PFVF(PBL_START);
 		params[5] = FW_PARAM_PFVF(PBL_END);
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
 				      params, val);
 		if (ret < 0)
 			goto bye;
@@ -3884,7 +3993,7 @@
 		params[3] = FW_PARAM_PFVF(CQ_END);
 		params[4] = FW_PARAM_PFVF(OCQ_START);
 		params[5] = FW_PARAM_PFVF(OCQ_END);
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
 				      val);
 		if (ret < 0)
 			goto bye;
@@ -3897,7 +4006,7 @@
 
 		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
 		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
 				      val);
 		if (ret < 0) {
 			adap->params.max_ordird_qp = 8;
@@ -3915,7 +4024,7 @@
 	if (caps_cmd.iscsicaps) {
 		params[0] = FW_PARAM_PFVF(ISCSI_START);
 		params[1] = FW_PARAM_PFVF(ISCSI_END);
-		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
 				      params, val);
 		if (ret < 0)
 			goto bye;
@@ -3961,8 +4070,8 @@
 			     adap->params.b_wnd);
 	}
 	t4_init_sge_params(adap);
-	t4_init_tp_params(adap);
 	adap->flags |= FW_OK;
+	t4_init_tp_params(adap);
 	return 0;
 
 	/*
@@ -3975,6 +4084,9 @@
 	kfree(adap->sge.ingr_map);
 	kfree(adap->sge.starving_fl);
 	kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+	kfree(adap->sge.blocked_fl);
+#endif
 	if (ret != -ETIMEDOUT && ret != -EIO)
 		t4_fw_bye(adap, adap->mbox);
 	return ret;
@@ -4042,7 +4154,7 @@
 
 	if (t4_wait_dev_ready(adap->regs) < 0)
 		return PCI_ERS_RESULT_DISCONNECT;
-	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
+	if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
 		return PCI_ERS_RESULT_DISCONNECT;
 	adap->flags |= FW_OK;
 	if (adap_init1(adap, &c))
@@ -4051,7 +4163,7 @@
 	for_each_port(adap, i) {
 		struct port_info *p = adap2pinfo(adap, i);
 
-		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+		ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
 				  NULL, NULL);
 		if (ret < 0)
 			return PCI_ERS_RESULT_DISCONNECT;
@@ -4342,7 +4454,12 @@
 
 static int init_rss(struct adapter *adap)
 {
-	unsigned int i, j;
+	unsigned int i;
+	int err;
+
+	err = t4_init_rss_mode(adap, adap->mbox);
+	if (err)
+		return err;
 
 	for_each_port(adap, i) {
 		struct port_info *pi = adap2pinfo(adap, i);
@@ -4350,8 +4467,6 @@
 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
 		if (!pi->rss)
 			return -ENOMEM;
-		for (j = 0; j < pi->rss_size; j++)
-			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
 	}
 	return 0;
 }
@@ -4415,15 +4530,23 @@
 	kfree(adapter->sge.ingr_map);
 	kfree(adapter->sge.starving_fl);
 	kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+	kfree(adapter->sge.blocked_fl);
+#endif
 	disable_msi(adapter);
 
 	for_each_port(adapter, i)
 		if (adapter->port[i]) {
+			struct port_info *pi = adap2pinfo(adapter, i);
+
+			if (pi->viid != 0)
+				t4_free_vi(adapter, adapter->mbox, adapter->pf,
+					   0, pi->viid);
 			kfree(adap2pinfo(adapter, i)->rss);
 			free_netdev(adapter->port[i]);
 		}
 	if (adapter->flags & FW_OK)
-		t4_fw_bye(adapter, adapter->fn);
+		t4_fw_bye(adapter, adapter->pf);
 }
 
 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
@@ -4514,7 +4637,7 @@
 	adapter->pdev = pdev;
 	adapter->pdev_dev = &pdev->dev;
 	adapter->mbox = func;
-	adapter->fn = func;
+	adapter->pf = func;
 	adapter->msg_enable = dflt_msg_enable;
 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
 
@@ -4534,7 +4657,7 @@
 	if (!is_t4(adapter->params.chip)) {
 		s_qpp = (QUEUESPERPAGEPF0_S +
 			(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
-			adapter->fn);
+			adapter->pf);
 		qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
 		      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
@@ -4557,10 +4680,15 @@
 			err = -ENOMEM;
 			goto out_free_adapter;
 		}
+		t4_write_reg(adapter, SGE_STAT_CFG_A,
+			     STATSOURCE_T5_V(7) | STATMODE_V(0));
 	}
 
 	setup_memwin(adapter);
 	err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+	bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
 	setup_memwin_rdma(adapter);
 	if (err)
 		goto out_unmap_bar;
@@ -4609,10 +4737,25 @@
 		err = t4_port_init(adapter, func, func, 0);
 		if (err)
 			goto out_free_dev;
+	} else if (adapter->params.nports == 1) {
+		/* If we don't have a connection to the firmware -- possibly
+		 * because of an error -- grab the raw VPD parameters so we
+		 * can set the proper MAC Address on the debug network
+		 * interface that we've created.
+		 */
+		u8 hw_addr[ETH_ALEN];
+		u8 *na = adapter->params.vpd.na;
+
+		err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+		if (!err) {
+			for (i = 0; i < ETH_ALEN; i++)
+				hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+					      hex2val(na[2 * i + 1]));
+			t4_set_hw_addr(adapter, 0, hw_addr);
+		}
 	}
 
-	/*
-	 * Configure queues and allocate tables now, they can be needed as
+	/* Configure queues and allocate tables now, they can be needed as
 	 * soon as the first register_netdev completes.
 	 */
 	cfg_queues(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e33934a..b27897d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -264,6 +264,7 @@
 	unsigned int max_ordird_qp;          /* Max ORD/IRD depth per RDMA QP */
 	unsigned int max_ird_adapter;        /* Max IRD memory per adapter */
 	bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
+	int nodeid;			     /* device numa node id */
 };
 
 struct cxgb4_uld_info {
@@ -297,8 +298,6 @@
 				   unsigned int skb_len, unsigned int pull_len);
 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
 int cxgb4_flush_eq_cache(struct net_device *dev);
-void cxgb4_disable_db_coalescing(struct net_device *dev);
-void cxgb4_enable_db_coalescing(struct net_device *dev);
 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
 u64 cxgb4_read_sge_timestamp(struct net_device *dev);
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1b99aec..942db07 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -100,16 +100,6 @@
  */
 #define TX_QCHECK_PERIOD (HZ / 2)
 
-/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
- * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
- * State Machines in the same state for this amount of time (in HZ) then we'll
- * issue a warning about a potential hang.  We'll repeat the warning as the
- * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
- * the situation clears.  If the situation clears, we'll note that as well.
- */
-#define SGE_IDMA_WARN_THRESH (1 * HZ)
-#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
-
 /*
  * Max number of Tx descriptors to be reclaimed by the Tx timer.
  */
@@ -532,14 +522,17 @@
 
 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
 {
-	u32 val;
 	if (q->pend_cred >= 8) {
+		u32 val = adap->params.arch.sge_fl_db;
+
 		if (is_t4(adap->params.chip))
-			val = PIDX_V(q->pend_cred / 8);
+			val |= PIDX_V(q->pend_cred / 8);
 		else
-			val = PIDX_T5_V(q->pend_cred / 8) |
-				DBTYPE_F;
-		val |= DBPRIO_F;
+			val |= PIDX_T5_V(q->pend_cred / 8);
+
+		/* Make sure all memory writes to the Free List queue are
+		 * committed before we tell the hardware about them.
+		 */
 		wmb();
 
 		/* If we don't have access to the new User Doorbell (T5+), use
@@ -594,6 +587,11 @@
 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 	int node;
 
+#ifdef CONFIG_DEBUG_FS
+	if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+		goto out;
+#endif
+
 	gfp |= __GFP_NOWARN;
 	node = dev_to_node(adap->pdev_dev);
 
@@ -930,7 +928,10 @@
  */
 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
 {
-	wmb();            /* write descriptors before telling HW */
+	/* Make sure that all writes to the TX Descriptors are committed
+	 * before we tell the hardware about them.
+	 */
+	wmb();
 
 	/* If we don't have access to the new User Doorbell (T5+), use the old
 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
@@ -1032,7 +1033,7 @@
  * Figure out what HW csum a packet wants and return the appropriate control
  * bits.
  */
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
 {
 	int csum_type;
 	const struct iphdr *iph = ip_hdr(skb);
@@ -1047,7 +1048,7 @@
 			 * unknown protocol, disable HW csum
 			 * and hope a bad packet is detected
 			 */
-			return TXPKT_L4CSUM_DIS;
+			return TXPKT_L4CSUM_DIS_F;
 		}
 	} else {
 		/*
@@ -1063,15 +1064,21 @@
 			goto nocsum;
 	}
 
-	if (likely(csum_type >= TX_CSUM_TCPIP))
-		return TXPKT_CSUM_TYPE(csum_type) |
-			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
-			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
-	else {
+	if (likely(csum_type >= TX_CSUM_TCPIP)) {
+		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		else
+			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+	} else {
 		int start = skb_transport_offset(skb);
 
-		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
-			TXPKT_CSUM_LOC(start + skb->csum_offset);
+		return TXPKT_CSUM_TYPE_V(csum_type) |
+			TXPKT_CSUM_START_V(start) |
+			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
 	}
 }
 
@@ -1112,11 +1119,11 @@
 		return -ENOTSUPP;
 
 	/* FC CRC offload */
-	*cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
-		     TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
-		     TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
-		     TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
-		     TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+	*cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+		     TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+		     TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+		     TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+		     TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
 	return 0;
 }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1130,7 +1137,6 @@
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	int len;
 	u32 wr_mid;
 	u64 cntrl, *end;
 	int qidx, credits;
@@ -1143,6 +1149,7 @@
 	const struct skb_shared_info *ssi;
 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
 	bool immediate = false;
+	int len, max_pkt_len;
 #ifdef CONFIG_CHELSIO_T4_FCOE
 	int err;
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1156,13 +1163,20 @@
 		return NETDEV_TX_OK;
 	}
 
+	/* Discard the packet if the length is greater than mtu */
+	max_pkt_len = ETH_HLEN + dev->mtu;
+	if (skb_vlan_tag_present(skb))
+		max_pkt_len += VLAN_HLEN;
+	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+		goto out_free;
+
 	pi = netdev_priv(dev);
 	adap = pi->adapter;
 	qidx = skb_get_queue_mapping(skb);
 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
 
 	reclaim_completed_tx(adap, &q->q, true);
-	cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
 
 #ifdef CONFIG_CHELSIO_T4_FCOE
 	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
@@ -1213,23 +1227,29 @@
 		len += sizeof(*lso);
 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
 				       FW_WR_IMMDLEN_V(len));
-		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
-					LSO_FIRST_SLICE | LSO_LAST_SLICE |
-					LSO_IPV6(v6) |
-					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
-					LSO_IPHDR_LEN(l3hdr_len / 4) |
-					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+		lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+					LSO_IPV6_V(v6) |
+					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
 		lso->c.ipid_ofst = htons(0);
 		lso->c.mss = htons(ssi->gso_size);
 		lso->c.seqno_offset = htonl(0);
 		if (is_t4(adap->params.chip))
 			lso->c.len = htonl(skb->len);
 		else
-			lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
+			lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
 		cpl = (void *)(lso + 1);
-		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
-			TXPKT_IPHDR_LEN(l3hdr_len) |
-			TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+			cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+		else
+			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+			 TXPKT_IPHDR_LEN_V(l3hdr_len);
 		q->tso++;
 		q->tx_cso += ssi->gso_segs;
 	} else {
@@ -1238,23 +1258,25 @@
 				       FW_WR_IMMDLEN_V(len));
 		cpl = (void *)(wr + 1);
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+			cntrl = hwcsum(adap->params.chip, skb) |
+				TXPKT_IPCSUM_DIS_F;
 			q->tx_cso++;
 		}
 	}
 
 	if (skb_vlan_tag_present(skb)) {
 		q->vlan_ins++;
-		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
 #ifdef CONFIG_CHELSIO_T4_FCOE
 		if (skb->protocol == htons(ETH_P_FCOE))
-			cntrl |= TXPKT_VLAN(
+			cntrl |= TXPKT_VLAN_V(
 				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 	}
 
-	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
-			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+	cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+			   TXPKT_INTF_V(pi->tx_chan) |
+			   TXPKT_PF_V(adap->pf));
 	cpl->pack = htons(0);
 	cpl->len = htons(skb->len);
 	cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1964,7 +1986,7 @@
 static inline bool is_new_response(const struct rsp_ctrl *r,
 				   const struct sge_rspq *q)
 {
-	return RSPD_GEN(r->type_gen) == q->gen;
+	return (r->type_gen >> RSPD_GEN_S) == q->gen;
 }
 
 /**
@@ -2011,19 +2033,19 @@
 			break;
 
 		dma_rmb();
-		rsp_type = RSPD_TYPE(rc->type_gen);
-		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+		rsp_type = RSPD_TYPE_G(rc->type_gen);
+		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
 			struct page_frag *fp;
 			struct pkt_gl si;
 			const struct rx_sw_desc *rsd;
 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
 
-			if (len & RSPD_NEWBUF) {
+			if (len & RSPD_NEWBUF_F) {
 				if (likely(q->offset > 0)) {
 					free_rx_bufs(q->adap, &rxq->fl, 1);
 					q->offset = 0;
 				}
-				len = RSPD_LEN(len);
+				len = RSPD_LEN_G(len);
 			}
 			si.tot_len = len;
 
@@ -2058,7 +2080,7 @@
 				q->offset += ALIGN(fp->size, s->fl_align);
 			else
 				restore_rx_bufs(&si, &rxq->fl, frags);
-		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
+		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
 			ret = q->handler(q, q->cur_desc, NULL);
 		} else {
 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
@@ -2066,7 +2088,7 @@
 
 		if (unlikely(ret)) {
 			/* couldn't process descriptor, back off for recovery */
-			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+			q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
 			break;
 		}
 
@@ -2090,7 +2112,7 @@
 		return LL_FLUSH_BUSY;
 
 	work_done = process_responses(q, 4);
-	params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+	params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
 	q->next_intr_params = params;
 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
 
@@ -2137,7 +2159,7 @@
 		int timer_index;
 
 		napi_complete(napi);
-		timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+		timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
 
 		if (q->adaptive_rx) {
 			if (work_done > max(timer_pkt_quota[timer_index],
@@ -2147,15 +2169,16 @@
 				timer_index = timer_index - 1;
 
 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
-			q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
-							      V_QINTR_CNT_EN;
+			q->next_intr_params =
+					QINTR_TIMER_IDX_V(timer_index) |
+					QINTR_CNT_EN_V(0);
 			params = q->next_intr_params;
 		} else {
 			params = q->next_intr_params;
 			q->next_intr_params = q->intr_params;
 		}
 	} else
-		params = QINTR_TIMER_IDX(7);
+		params = QINTR_TIMER_IDX_V(7);
 
 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
 
@@ -2203,7 +2226,7 @@
 			break;
 
 		dma_rmb();
-		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+		if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
 			unsigned int qid = ntohl(rc->pldbuflen_qid);
 
 			qid -= adap->sge.ingr_start;
@@ -2279,7 +2302,7 @@
 static void sge_rx_timer_cb(unsigned long data)
 {
 	unsigned long m;
-	unsigned int i, idma_same_state_cnt[2];
+	unsigned int i;
 	struct adapter *adap = (struct adapter *)data;
 	struct sge *s = &adap->sge;
 
@@ -2300,67 +2323,16 @@
 					set_bit(id, s->starving_fl);
 			}
 		}
+	/* The remainder of the SGE RX Timer Callback routine is dedicated to
+	 * global Master PF activities like checking for chip ingress stalls,
+	 * etc.
+	 */
+	if (!(adap->flags & MASTER_PF))
+		goto done;
 
-	t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
-	idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
-	idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
+	t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
 
-	for (i = 0; i < 2; i++) {
-		u32 debug0, debug11;
-
-		/* If the Ingress DMA Same State Counter ("timer") is less
-		 * than 1s, then we can reset our synthesized Stall Timer and
-		 * continue.  If we have previously emitted warnings about a
-		 * potential stalled Ingress Queue, issue a note indicating
-		 * that the Ingress Queue has resumed forward progress.
-		 */
-		if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
-			if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
-				CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
-					i, s->idma_qid[i],
-					s->idma_stalled[i]/HZ);
-			s->idma_stalled[i] = 0;
-			continue;
-		}
-
-		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
-		 * domain.  The first time we get here it'll be because we
-		 * passed the 1s Threshold; each additional time it'll be
-		 * because the RX Timer Callback is being fired on its regular
-		 * schedule.
-		 *
-		 * If the stall is below our Potential Hung Ingress Queue
-		 * Warning Threshold, continue.
-		 */
-		if (s->idma_stalled[i] == 0)
-			s->idma_stalled[i] = HZ;
-		else
-			s->idma_stalled[i] += RX_QCHECK_PERIOD;
-
-		if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
-			continue;
-
-		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
-		if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
-			continue;
-
-		/* Read and save the SGE IDMA State and Queue ID information.
-		 * We do this every time in case it changes across time ...
-		 */
-		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
-		debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-		s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
-
-		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
-		debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-		s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
-
-		CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
-			i, s->idma_qid[i], s->idma_state[i],
-			s->idma_stalled[i]/HZ, debug0, debug11);
-		t4_sge_decode_idma_state(adap, s->idma_state[i]);
-	}
-
+done:
 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
 }
 
@@ -2429,17 +2401,20 @@
 	u64 bar2_qoffset;
 	int ret;
 
-	ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, 0,
-				      &bar2_qoffset, pbar2_qid);
+	ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
+				&bar2_qoffset, pbar2_qid);
 	if (ret)
 		return NULL;
 
 	return adapter->bar2 + bar2_qoffset;
 }
 
+/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 		     struct net_device *dev, int intr_idx,
-		     struct sge_fl *fl, rspq_handler_t hnd)
+		     struct sge_fl *fl, rspq_handler_t hnd, int cong)
 {
 	int ret, flsz = 0;
 	struct fw_iq_cmd c;
@@ -2457,12 +2432,13 @@
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-			    FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+			    FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
 				 FW_LEN16(c));
 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
 		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
-		FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+		FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+		FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
 		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
 							-intr_idx - 1));
 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
@@ -2471,8 +2447,21 @@
 		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
 	c.iqsize = htons(iq->size);
 	c.iqaddr = cpu_to_be64(iq->phys_addr);
+	if (cong >= 0)
+		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
 
 	if (fl) {
+		enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+		/* Allocate the ring for the hardware free list (with space
+		 * for its status page) along with the associated software
+		 * descriptor ring.  The free list size needs to be a multiple
+		 * of the Egress Queue Unit and at least 2 Egress Units larger
+		 * than the SGE's Egress Congrestion Threshold
+		 * (fl_starve_thres - 1).
+		 */
+		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+			fl->size = s->fl_starve_thres - 1 + 2 * 8;
 		fl->size = roundup(fl->size, 8);
 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
 				      sizeof(struct rx_sw_desc), &fl->addr,
@@ -2481,17 +2470,25 @@
 			goto fl_nomem;
 
 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
-		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
-					    FW_IQ_CMD_FL0FETCHRO_F |
-					    FW_IQ_CMD_FL0DATARO_F |
-					    FW_IQ_CMD_FL0PADEN_F);
-		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
-				FW_IQ_CMD_FL0FBMAX_V(3));
+		c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
+					     FW_IQ_CMD_FL0FETCHRO_F |
+					     FW_IQ_CMD_FL0DATARO_F |
+					     FW_IQ_CMD_FL0PADEN_F);
+		if (cong >= 0)
+			c.iqns_to_fl0congen |=
+				htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
+				      FW_IQ_CMD_FL0CONGCIF_F |
+				      FW_IQ_CMD_FL0CONGEN_F);
+		c.fl0dcaen_to_fl0cidxfthresh =
+			htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+			      FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+						   FETCHBURSTMAX_512B_X :
+						   FETCHBURSTMAX_256B_X));
 		c.fl0size = htons(flsz);
 		c.fl0addr = cpu_to_be64(fl->addr);
 	}
 
-	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
 	if (ret)
 		goto err;
 
@@ -2532,6 +2529,41 @@
 					     &fl->bar2_qid);
 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
 	}
+
+	/* For T5 and later we attempt to set up the Congestion Manager values
+	 * of the new RX Ethernet Queue.  This should really be handled by
+	 * firmware because it's more complex than any host driver wants to
+	 * get involved with and it's different per chip and this is almost
+	 * certainly wrong.  Firmware would be wrong as well, but it would be
+	 * a lot easier to fix in one place ...  For now we do something very
+	 * simple (and hopefully less wrong).
+	 */
+	if (!is_t4(adap->params.chip) && cong >= 0) {
+		u32 param, val;
+		int i;
+
+		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+			 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
+		if (cong == 0) {
+			val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
+		} else {
+			val =
+			    CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
+			for (i = 0; i < 4; i++) {
+				if (cong & (1 << i))
+					val |=
+					     CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+			}
+		}
+		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+				    &param, &val);
+		if (ret)
+			dev_warn(adap->pdev_dev, "Failed to set Congestion"
+				 " Manager Context for Ingress Queue %d: %d\n",
+				 iq->cntxt_id, -ret);
+	}
+
 	return 0;
 
 fl_nomem:
@@ -2589,23 +2621,24 @@
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-			    FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+			    FW_EQ_ETH_CMD_PFN_V(adap->pf) |
 			    FW_EQ_ETH_CMD_VFN_V(0));
 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
 				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
 			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
-	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
-				   FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
-				   FW_EQ_ETH_CMD_FETCHRO_V(1) |
-				   FW_EQ_ETH_CMD_IQID_V(iqid));
-	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
-				  FW_EQ_ETH_CMD_FBMAX_V(3) |
-				  FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
-				  FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+	c.fetchszm_to_iqid =
+		htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+		      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+		      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+	c.dcaen_to_eqsize =
+		htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+		      FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+		      FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+		      FW_EQ_ETH_CMD_EQSIZE_V(nentries));
 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
 	if (ret) {
 		kfree(txq->q.sdesc);
 		txq->q.sdesc = NULL;
@@ -2637,29 +2670,30 @@
 
 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
-				 NULL, 0, NUMA_NO_NODE);
+				 NULL, 0, dev_to_node(adap->pdev_dev));
 	if (!txq->q.desc)
 		return -ENOMEM;
 
 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-			    FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+			    FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
 			    FW_EQ_CTRL_CMD_VFN_V(0));
 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
 				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
 	c.physeqid_pkd = htonl(0);
-	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
-				   FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
-				   FW_EQ_CTRL_CMD_FETCHRO_F |
-				   FW_EQ_CTRL_CMD_IQID_V(iqid));
-	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
-				  FW_EQ_CTRL_CMD_FBMAX_V(3) |
-				  FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
-				  FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+	c.fetchszm_to_iqid =
+		htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+		      FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+		      FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+	c.dcaen_to_eqsize =
+		htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+		      FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+		      FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+		      FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
 	if (ret) {
 		dma_free_coherent(adap->pdev_dev,
 				  nentries * sizeof(struct tx_desc),
@@ -2697,21 +2731,22 @@
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-			    FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+			    FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
 			    FW_EQ_OFLD_CMD_VFN_V(0));
 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
 				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
-	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
-				   FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
-				   FW_EQ_OFLD_CMD_FETCHRO_F |
-				   FW_EQ_OFLD_CMD_IQID_V(iqid));
-	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
-				  FW_EQ_OFLD_CMD_FBMAX_V(3) |
-				  FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
-				  FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+	c.fetchszm_to_iqid =
+		htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+		      FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+		      FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+	c.dcaen_to_eqsize =
+		htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+		      FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+		      FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+		      FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
 	if (ret) {
 		kfree(txq->q.sdesc);
 		txq->q.sdesc = NULL;
@@ -2750,7 +2785,7 @@
 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
 
 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
-	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
 		   rq->cntxt_id, fl_id, 0xffff);
 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
 			  rq->desc, rq->phys_addr);
@@ -2805,7 +2840,7 @@
 			free_rspq_fl(adap, &eq->rspq,
 				     eq->fl.size ? &eq->fl : NULL);
 		if (etq->q.desc) {
-			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
 				       etq->q.cntxt_id);
 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
 			kfree(etq->q.sdesc);
@@ -2824,7 +2859,7 @@
 
 		if (q->q.desc) {
 			tasklet_kill(&q->qresume_tsk);
-			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+			t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
 					q->q.cntxt_id);
 			free_tx_desc(adap, &q->q, q->q.in_use, false);
 			kfree(q->q.sdesc);
@@ -2839,7 +2874,7 @@
 
 		if (cq->q.desc) {
 			tasklet_kill(&cq->qresume_tsk);
-			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
 					cq->q.cntxt_id);
 			__skb_queue_purge(&cq->sendq);
 			free_txq(adap, &cq->q);
@@ -3023,7 +3058,11 @@
 	 * Packing Boundary.  T5 introduced the ability to specify these
 	 * separately.  The actual Ingress Packet Data alignment boundary
 	 * within Packed Buffer Mode is the maximum of these two
-	 * specifications.
+	 * specifications.  (Note that it makes no real practical sense to
+	 * have the Pading Boudary be larger than the Packing Boundary but you
+	 * could set the chip up that way and, in fact, legacy T4 code would
+	 * end doing this because it would initialize the Padding Boundary and
+	 * leave the Packing Boundary initialized to 0 (16 bytes).)
 	 */
 	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
 			       INGPADBOUNDARY_SHIFT_X);
@@ -3067,11 +3106,14 @@
 		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
 	s->fl_starve_thres = 2*egress_threshold + 1;
 
+	t4_idma_monitor_init(adap, &s->idma_monitor);
+
+	/* Set up timers used for recuring callbacks to process RX and TX
+	 * administrative tasks.
+	 */
 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
-	s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
-	s->idma_stalled[0] = 0;
-	s->idma_stalled[1] = 0;
+
 	spin_lock_init(&s->intrq_lock);
 
 	return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 61d8b3e..2b52aae 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -150,7 +150,12 @@
  */
 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
 {
-	u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
+	u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		req |= ENABLE_F;
+	else
+		req |= T6_ENABLE_F;
 
 	if (is_t4(adap->params.chip))
 		req |= LOCALCFG_F;
@@ -214,8 +219,8 @@
 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
 	dev_alert(adap->pdev_dev,
 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
-		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
-		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+		  asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+		  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
 }
 
 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
@@ -233,13 +238,14 @@
 }
 
 /**
- *	t4_wr_mbox_meat - send a command to FW through the given mailbox
+ *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
  *	@adap: the adapter
  *	@mbox: index of the mailbox to use
  *	@cmd: the command to write
  *	@size: command length in bytes
  *	@rpl: where to optionally store the reply
  *	@sleep_ok: if true we may sleep while awaiting command completion
+ *	@timeout: time to wait for command to finish before timing out
  *
  *	Sends the given command to FW through the selected mailbox and waits
  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
@@ -254,8 +260,8 @@
  *	command or FW executes it but signals an error.  In the latter case
  *	the return value is the error code indicated by FW (negated).
  */
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
-		    void *rpl, bool sleep_ok)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+			    int size, void *rpl, bool sleep_ok, int timeout)
 {
 	static const int delay[] = {
 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
@@ -294,7 +300,7 @@
 	delay_idx = 0;
 	ms = delay[0];
 
-	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+	for (i = 0; i < timeout; i += ms) {
 		if (sleep_ok) {
 			ms = delay[delay_idx];  /* last element may repeat */
 			if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -332,114 +338,11 @@
 	return -ETIMEDOUT;
 }
 
-/**
- *	t4_mc_read - read from MC through backdoor accesses
- *	@adap: the adapter
- *	@addr: address of first byte requested
- *	@idx: which MC to access
- *	@data: 64 bytes of data containing the requested address
- *	@ecc: where to store the corresponding 64-bit ECC word
- *
- *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
- *	that covers the requested address @addr.  If @parity is not %NULL it
- *	is assigned the 64-bit ECC word for the read data.
- */
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok)
 {
-	int i;
-	u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
-	u32 mc_bist_status_rdata, mc_bist_data_pattern;
-
-	if (is_t4(adap->params.chip)) {
-		mc_bist_cmd = MC_BIST_CMD_A;
-		mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
-		mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
-		mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
-		mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
-	} else {
-		mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
-		mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
-		mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
-		mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
-		mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
-	}
-
-	if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
-		return -EBUSY;
-	t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
-	t4_write_reg(adap, mc_bist_cmd_len, 64);
-	t4_write_reg(adap, mc_bist_data_pattern, 0xc);
-	t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
-		     BIST_CMD_GAP_V(1));
-	i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
-	if (i)
-		return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
-
-	for (i = 15; i >= 0; i--)
-		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
-	if (ecc)
-		*ecc = t4_read_reg64(adap, MC_DATA(16));
-#undef MC_DATA
-	return 0;
-}
-
-/**
- *	t4_edc_read - read from EDC through backdoor accesses
- *	@adap: the adapter
- *	@idx: which EDC to access
- *	@addr: address of first byte requested
- *	@data: 64 bytes of data containing the requested address
- *	@ecc: where to store the corresponding 64-bit ECC word
- *
- *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- *	that covers the requested address @addr.  If @parity is not %NULL it
- *	is assigned the 64-bit ECC word for the read data.
- */
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
-{
-	int i;
-	u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
-	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
-
-	if (is_t4(adap->params.chip)) {
-		edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
-		edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
-		edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
-		edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
-						    idx);
-		edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
-						idx);
-	} else {
-		edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
-		edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
-		edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
-		edc_bist_cmd_data_pattern =
-			EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
-		edc_bist_status_rdata =
-			 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
-	}
-
-	if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
-		return -EBUSY;
-	t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
-	t4_write_reg(adap, edc_bist_cmd_len, 64);
-	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
-	t4_write_reg(adap, edc_bist_cmd,
-		     BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
-	i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
-	if (i)
-		return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
-
-	for (i = 15; i >= 0; i--)
-		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
-	if (ecc)
-		*ecc = t4_read_reg64(adap, EDC_DATA(16));
-#undef EDC_DATA
-	return 0;
+	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+				       FW_CMD_MAX_TIMEOUT);
 }
 
 /**
@@ -483,9 +386,8 @@
 	/* Offset into the region of memory which is being accessed
 	 * MEM_EDC0 = 0
 	 * MEM_EDC1 = 1
-	 * MEM_MC   = 2 -- T4
-	 * MEM_MC0  = 2 -- For T5
-	 * MEM_MC1  = 3 -- For T5
+	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
+	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
 	 */
 	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
 	if (mtype != MEM_MC1)
@@ -514,7 +416,7 @@
 	mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
 	if (is_t4(adap->params.chip))
 		mem_base -= adap->t4_bar0;
-	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
+	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
 
 	/* Calculate our initial PCI-E Memory Window Position and Offset into
 	 * that Window.
@@ -625,6 +527,102 @@
 	return 0;
 }
 
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function.  We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+	u32 val, ldst_addrspace;
+
+	/* If fw_attach != 0, construct and send the Firmware LDST Command to
+	 * retrieve the specified PCI-E Configuration Space register.
+	 */
+	struct fw_ldst_cmd ldst_cmd;
+	int ret;
+
+	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
+	ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					       FW_CMD_REQUEST_F |
+					       FW_CMD_READ_F |
+					       ldst_addrspace);
+	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+	ldst_cmd.u.pcie.ctrl_to_fn =
+		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
+	ldst_cmd.u.pcie.r = reg;
+
+	/* If the LDST Command succeeds, return the result, otherwise
+	 * fall through to reading it directly ourselves ...
+	 */
+	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+			 &ldst_cmd);
+	if (ret == 0)
+		val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+	else
+		/* Read the desired Configuration Space register via the PCI-E
+		 * Backdoor mechanism.
+		 */
+		t4_hw_pci_read_cfg4(adap, reg, &val);
+	return val;
+}
+
+/* Get the window based on base passed to it.
+ * Window aperture is currently unhandled, but there is no use case for it
+ * right now
+ */
+static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
+			 u32 memwin_base)
+{
+	u32 ret;
+
+	if (is_t4(adap->params.chip)) {
+		u32 bar0;
+
+		/* Truncation intentional: we only read the bottom 32-bits of
+		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
+		 * mechanism to read BAR0 instead of using
+		 * pci_resource_start() because we could be operating from
+		 * within a Virtual Machine which is trapping our accesses to
+		 * our Configuration Space and we need to set up the PCI-E
+		 * Memory Window decoders with the actual addresses which will
+		 * be coming across the PCI-E link.
+		 */
+		bar0 = t4_read_pcie_cfg4(adap, pci_base);
+		bar0 &= pci_mask;
+		adap->t4_bar0 = bar0;
+
+		ret = bar0 + memwin_base;
+	} else {
+		/* For T5, only relative offset inside the PCIe BAR is passed */
+		ret = memwin_base;
+	}
+	return ret;
+}
+
+/* Get the default utility window (win0) used by everyone */
+u32 t4_get_util_window(struct adapter *adap)
+{
+	return t4_get_window(adap, PCI_BASE_ADDRESS_0,
+			     PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
+}
+
+/* Set up memory window for accessing adapter memory ranges.  (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
+{
+	t4_write_reg(adap,
+		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
+		     memwin_base | BIR_V(0) |
+		     WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
+	t4_read_reg(adap,
+		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
+}
+
 /**
  *	t4_get_regs_len - return the size of the chips register set
  *	@adapter: the adapter
@@ -640,6 +638,7 @@
 		return T4_REGMAP_SIZE;
 
 	case CHELSIO_T5:
+	case CHELSIO_T6:
 		return T5_REGMAP_SIZE;
 	}
 
@@ -666,7 +665,8 @@
 		0x11fc, 0x123c,
 		0x1300, 0x173c,
 		0x1800, 0x18fc,
-		0x3000, 0x30d8,
+		0x3000, 0x305c,
+		0x3068, 0x30d8,
 		0x30e0, 0x5924,
 		0x5960, 0x59d4,
 		0x5a00, 0x5af8,
@@ -729,7 +729,7 @@
 		0x19238, 0x1924c,
 		0x193f8, 0x19474,
 		0x19490, 0x194f8,
-		0x19800, 0x19f30,
+		0x19800, 0x19f4c,
 		0x1a000, 0x1a06c,
 		0x1a0b0, 0x1a120,
 		0x1a128, 0x1a138,
@@ -878,7 +878,7 @@
 		0x27780, 0x2778c,
 		0x27800, 0x27c38,
 		0x27c80, 0x27d7c,
-		0x27e00, 0x27e04
+		0x27e00, 0x27e04,
 	};
 
 	static const unsigned int t5_reg_ranges[] = {
@@ -888,7 +888,7 @@
 		0x1280, 0x173c,
 		0x1800, 0x18fc,
 		0x3000, 0x3028,
-		0x3060, 0x30d8,
+		0x3068, 0x30d8,
 		0x30e0, 0x30fc,
 		0x3140, 0x357c,
 		0x35a8, 0x35cc,
@@ -900,7 +900,7 @@
 		0x5940, 0x59dc,
 		0x59fc, 0x5a18,
 		0x5a60, 0x5a9c,
-		0x5b9c, 0x5bfc,
+		0x5b94, 0x5bfc,
 		0x6000, 0x6040,
 		0x6058, 0x614c,
 		0x7700, 0x7798,
@@ -1014,27 +1014,30 @@
 		0x30800, 0x30834,
 		0x308c0, 0x30908,
 		0x30910, 0x309ac,
-		0x30a00, 0x30a04,
-		0x30a0c, 0x30a2c,
+		0x30a00, 0x30a2c,
 		0x30a44, 0x30a50,
 		0x30a74, 0x30c24,
+		0x30d00, 0x30d00,
 		0x30d08, 0x30d14,
 		0x30d1c, 0x30d20,
 		0x30d3c, 0x30d50,
 		0x31200, 0x3120c,
 		0x31220, 0x31220,
 		0x31240, 0x31240,
-		0x31600, 0x31600,
-		0x31608, 0x3160c,
+		0x31600, 0x3160c,
 		0x31a00, 0x31a1c,
-		0x31e04, 0x31e20,
+		0x31e00, 0x31e20,
 		0x31e38, 0x31e3c,
 		0x31e80, 0x31e80,
 		0x31e88, 0x31ea8,
 		0x31eb0, 0x31eb4,
 		0x31ec8, 0x31ed4,
 		0x31fb8, 0x32004,
-		0x32208, 0x3223c,
+		0x32200, 0x32200,
+		0x32208, 0x32240,
+		0x32248, 0x32280,
+		0x32288, 0x322c0,
+		0x322c8, 0x322fc,
 		0x32600, 0x32630,
 		0x32a00, 0x32abc,
 		0x32b00, 0x32b70,
@@ -1074,27 +1077,30 @@
 		0x34800, 0x34834,
 		0x348c0, 0x34908,
 		0x34910, 0x349ac,
-		0x34a00, 0x34a04,
-		0x34a0c, 0x34a2c,
+		0x34a00, 0x34a2c,
 		0x34a44, 0x34a50,
 		0x34a74, 0x34c24,
+		0x34d00, 0x34d00,
 		0x34d08, 0x34d14,
 		0x34d1c, 0x34d20,
 		0x34d3c, 0x34d50,
 		0x35200, 0x3520c,
 		0x35220, 0x35220,
 		0x35240, 0x35240,
-		0x35600, 0x35600,
-		0x35608, 0x3560c,
+		0x35600, 0x3560c,
 		0x35a00, 0x35a1c,
-		0x35e04, 0x35e20,
+		0x35e00, 0x35e20,
 		0x35e38, 0x35e3c,
 		0x35e80, 0x35e80,
 		0x35e88, 0x35ea8,
 		0x35eb0, 0x35eb4,
 		0x35ec8, 0x35ed4,
 		0x35fb8, 0x36004,
-		0x36208, 0x3623c,
+		0x36200, 0x36200,
+		0x36208, 0x36240,
+		0x36248, 0x36280,
+		0x36288, 0x362c0,
+		0x362c8, 0x362fc,
 		0x36600, 0x36630,
 		0x36a00, 0x36abc,
 		0x36b00, 0x36b70,
@@ -1134,27 +1140,30 @@
 		0x38800, 0x38834,
 		0x388c0, 0x38908,
 		0x38910, 0x389ac,
-		0x38a00, 0x38a04,
-		0x38a0c, 0x38a2c,
+		0x38a00, 0x38a2c,
 		0x38a44, 0x38a50,
 		0x38a74, 0x38c24,
+		0x38d00, 0x38d00,
 		0x38d08, 0x38d14,
 		0x38d1c, 0x38d20,
 		0x38d3c, 0x38d50,
 		0x39200, 0x3920c,
 		0x39220, 0x39220,
 		0x39240, 0x39240,
-		0x39600, 0x39600,
-		0x39608, 0x3960c,
+		0x39600, 0x3960c,
 		0x39a00, 0x39a1c,
-		0x39e04, 0x39e20,
+		0x39e00, 0x39e20,
 		0x39e38, 0x39e3c,
 		0x39e80, 0x39e80,
 		0x39e88, 0x39ea8,
 		0x39eb0, 0x39eb4,
 		0x39ec8, 0x39ed4,
 		0x39fb8, 0x3a004,
-		0x3a208, 0x3a23c,
+		0x3a200, 0x3a200,
+		0x3a208, 0x3a240,
+		0x3a248, 0x3a280,
+		0x3a288, 0x3a2c0,
+		0x3a2c8, 0x3a2fc,
 		0x3a600, 0x3a630,
 		0x3aa00, 0x3aabc,
 		0x3ab00, 0x3ab70,
@@ -1194,27 +1203,30 @@
 		0x3c800, 0x3c834,
 		0x3c8c0, 0x3c908,
 		0x3c910, 0x3c9ac,
-		0x3ca00, 0x3ca04,
-		0x3ca0c, 0x3ca2c,
+		0x3ca00, 0x3ca2c,
 		0x3ca44, 0x3ca50,
 		0x3ca74, 0x3cc24,
+		0x3cd00, 0x3cd00,
 		0x3cd08, 0x3cd14,
 		0x3cd1c, 0x3cd20,
 		0x3cd3c, 0x3cd50,
 		0x3d200, 0x3d20c,
 		0x3d220, 0x3d220,
 		0x3d240, 0x3d240,
-		0x3d600, 0x3d600,
-		0x3d608, 0x3d60c,
+		0x3d600, 0x3d60c,
 		0x3da00, 0x3da1c,
-		0x3de04, 0x3de20,
+		0x3de00, 0x3de20,
 		0x3de38, 0x3de3c,
 		0x3de80, 0x3de80,
 		0x3de88, 0x3dea8,
 		0x3deb0, 0x3deb4,
 		0x3dec8, 0x3ded4,
 		0x3dfb8, 0x3e004,
-		0x3e208, 0x3e23c,
+		0x3e200, 0x3e200,
+		0x3e208, 0x3e240,
+		0x3e248, 0x3e280,
+		0x3e288, 0x3e2c0,
+		0x3e2c8, 0x3e2fc,
 		0x3e600, 0x3e630,
 		0x3ea00, 0x3eabc,
 		0x3eb00, 0x3eb70,
@@ -1247,7 +1259,7 @@
 		0x3fcf0, 0x3fcfc,
 		0x40000, 0x4000c,
 		0x40040, 0x40068,
-		0x40080, 0x40144,
+		0x4007c, 0x40144,
 		0x40180, 0x4018c,
 		0x40200, 0x40298,
 		0x402ac, 0x4033c,
@@ -1275,7 +1287,7 @@
 		0x47800, 0x47814,
 		0x48000, 0x4800c,
 		0x48040, 0x48068,
-		0x48080, 0x48144,
+		0x4807c, 0x48144,
 		0x48180, 0x4818c,
 		0x48200, 0x48298,
 		0x482ac, 0x4833c,
@@ -1309,6 +1321,344 @@
 		0x51300, 0x51308,
 	};
 
+	static const unsigned int t6_reg_ranges[] = {
+		0x1008, 0x114c,
+		0x1180, 0x11b4,
+		0x11fc, 0x1250,
+		0x1280, 0x133c,
+		0x1800, 0x18fc,
+		0x3000, 0x302c,
+		0x3060, 0x30d8,
+		0x30e0, 0x30fc,
+		0x3140, 0x357c,
+		0x35a8, 0x35cc,
+		0x35ec, 0x35ec,
+		0x3600, 0x5624,
+		0x56cc, 0x575c,
+		0x580c, 0x5814,
+		0x5890, 0x58bc,
+		0x5940, 0x595c,
+		0x5980, 0x598c,
+		0x59b0, 0x59dc,
+		0x59fc, 0x5a18,
+		0x5a60, 0x5a6c,
+		0x5a80, 0x5a9c,
+		0x5b94, 0x5bfc,
+		0x5c10, 0x5ec0,
+		0x5ec8, 0x5ec8,
+		0x6000, 0x6040,
+		0x6058, 0x6154,
+		0x7700, 0x7798,
+		0x77c0, 0x7880,
+		0x78cc, 0x78fc,
+		0x7b00, 0x7c54,
+		0x7d00, 0x7efc,
+		0x8dc0, 0x8de0,
+		0x8df8, 0x8e84,
+		0x8ea0, 0x8f88,
+		0x8fb8, 0x911c,
+		0x9400, 0x9470,
+		0x9600, 0x971c,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0xa020,
+		0xd004, 0xd03c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0xf008,
+		0x11000, 0x11014,
+		0x11048, 0x11110,
+		0x11118, 0x1117c,
+		0x11190, 0x11260,
+		0x11300, 0x1130c,
+		0x12000, 0x1205c,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x19124,
+		0x19150, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x192b8,
+		0x193f8, 0x19474,
+		0x19490, 0x194cc,
+		0x194f0, 0x194f8,
+		0x19c00, 0x19c80,
+		0x19c94, 0x19cbc,
+		0x19ce4, 0x19d28,
+		0x19d50, 0x19d78,
+		0x19d94, 0x19dc8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e6c,
+		0x19ea0, 0x19f34,
+		0x19f40, 0x19f50,
+		0x19f90, 0x19fac,
+		0x19fc4, 0x19fe4,
+		0x1a000, 0x1a06c,
+		0x1a0b0, 0x1a120,
+		0x1a128, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e008, 0x1e00c,
+		0x1e040, 0x1e04c,
+		0x1e284, 0x1e290,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e408, 0x1e40c,
+		0x1e440, 0x1e44c,
+		0x1e684, 0x1e690,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e808, 0x1e80c,
+		0x1e840, 0x1e84c,
+		0x1ea84, 0x1ea90,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec08, 0x1ec0c,
+		0x1ec40, 0x1ec4c,
+		0x1ee84, 0x1ee90,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f008, 0x1f00c,
+		0x1f040, 0x1f04c,
+		0x1f284, 0x1f290,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f408, 0x1f40c,
+		0x1f440, 0x1f44c,
+		0x1f684, 0x1f690,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f808, 0x1f80c,
+		0x1f840, 0x1f84c,
+		0x1fa84, 0x1fa90,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc08, 0x1fc0c,
+		0x1fc40, 0x1fc4c,
+		0x1fe84, 0x1fe90,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x30000, 0x30070,
+		0x30100, 0x3015c,
+		0x30190, 0x301d0,
+		0x30200, 0x30318,
+		0x30400, 0x3052c,
+		0x30540, 0x3061c,
+		0x30800, 0x3088c,
+		0x308c0, 0x30908,
+		0x30910, 0x309b8,
+		0x30a00, 0x30a04,
+		0x30a0c, 0x30a2c,
+		0x30a44, 0x30a50,
+		0x30a74, 0x30c24,
+		0x30d00, 0x30d3c,
+		0x30d44, 0x30d7c,
+		0x30de0, 0x30de0,
+		0x30e00, 0x30ed4,
+		0x30f00, 0x30fa4,
+		0x30fc0, 0x30fc4,
+		0x31000, 0x31004,
+		0x31080, 0x310fc,
+		0x31208, 0x31220,
+		0x3123c, 0x31254,
+		0x31300, 0x31300,
+		0x31308, 0x3131c,
+		0x31338, 0x3133c,
+		0x31380, 0x31380,
+		0x31388, 0x313a8,
+		0x313b4, 0x313b4,
+		0x31400, 0x31420,
+		0x31438, 0x3143c,
+		0x31480, 0x31480,
+		0x314a8, 0x314a8,
+		0x314b0, 0x314b4,
+		0x314c8, 0x314d4,
+		0x31a40, 0x31a4c,
+		0x31af0, 0x31b20,
+		0x31b38, 0x31b3c,
+		0x31b80, 0x31b80,
+		0x31ba8, 0x31ba8,
+		0x31bb0, 0x31bb4,
+		0x31bc8, 0x31bd4,
+		0x32140, 0x3218c,
+		0x321f0, 0x32200,
+		0x32218, 0x32218,
+		0x32400, 0x32400,
+		0x32408, 0x3241c,
+		0x32618, 0x32620,
+		0x32664, 0x32664,
+		0x326a8, 0x326a8,
+		0x326ec, 0x326ec,
+		0x32a00, 0x32abc,
+		0x32b00, 0x32b78,
+		0x32c00, 0x32c00,
+		0x32c08, 0x32c3c,
+		0x32e00, 0x32e2c,
+		0x32f00, 0x32f2c,
+		0x33000, 0x330ac,
+		0x330c0, 0x331ac,
+		0x331c0, 0x332c4,
+		0x332e4, 0x333c4,
+		0x333e4, 0x334ac,
+		0x334c0, 0x335ac,
+		0x335c0, 0x336c4,
+		0x336e4, 0x337c4,
+		0x337e4, 0x337fc,
+		0x33814, 0x33814,
+		0x33854, 0x33868,
+		0x33880, 0x3388c,
+		0x338c0, 0x338d0,
+		0x338e8, 0x338ec,
+		0x33900, 0x339ac,
+		0x339c0, 0x33ac4,
+		0x33ae4, 0x33b10,
+		0x33b24, 0x33b50,
+		0x33bf0, 0x33c10,
+		0x33c24, 0x33c50,
+		0x33cf0, 0x33cfc,
+		0x34000, 0x34070,
+		0x34100, 0x3415c,
+		0x34190, 0x341d0,
+		0x34200, 0x34318,
+		0x34400, 0x3452c,
+		0x34540, 0x3461c,
+		0x34800, 0x3488c,
+		0x348c0, 0x34908,
+		0x34910, 0x349b8,
+		0x34a00, 0x34a04,
+		0x34a0c, 0x34a2c,
+		0x34a44, 0x34a50,
+		0x34a74, 0x34c24,
+		0x34d00, 0x34d3c,
+		0x34d44, 0x34d7c,
+		0x34de0, 0x34de0,
+		0x34e00, 0x34ed4,
+		0x34f00, 0x34fa4,
+		0x34fc0, 0x34fc4,
+		0x35000, 0x35004,
+		0x35080, 0x350fc,
+		0x35208, 0x35220,
+		0x3523c, 0x35254,
+		0x35300, 0x35300,
+		0x35308, 0x3531c,
+		0x35338, 0x3533c,
+		0x35380, 0x35380,
+		0x35388, 0x353a8,
+		0x353b4, 0x353b4,
+		0x35400, 0x35420,
+		0x35438, 0x3543c,
+		0x35480, 0x35480,
+		0x354a8, 0x354a8,
+		0x354b0, 0x354b4,
+		0x354c8, 0x354d4,
+		0x35a40, 0x35a4c,
+		0x35af0, 0x35b20,
+		0x35b38, 0x35b3c,
+		0x35b80, 0x35b80,
+		0x35ba8, 0x35ba8,
+		0x35bb0, 0x35bb4,
+		0x35bc8, 0x35bd4,
+		0x36140, 0x3618c,
+		0x361f0, 0x36200,
+		0x36218, 0x36218,
+		0x36400, 0x36400,
+		0x36408, 0x3641c,
+		0x36618, 0x36620,
+		0x36664, 0x36664,
+		0x366a8, 0x366a8,
+		0x366ec, 0x366ec,
+		0x36a00, 0x36abc,
+		0x36b00, 0x36b78,
+		0x36c00, 0x36c00,
+		0x36c08, 0x36c3c,
+		0x36e00, 0x36e2c,
+		0x36f00, 0x36f2c,
+		0x37000, 0x370ac,
+		0x370c0, 0x371ac,
+		0x371c0, 0x372c4,
+		0x372e4, 0x373c4,
+		0x373e4, 0x374ac,
+		0x374c0, 0x375ac,
+		0x375c0, 0x376c4,
+		0x376e4, 0x377c4,
+		0x377e4, 0x377fc,
+		0x37814, 0x37814,
+		0x37854, 0x37868,
+		0x37880, 0x3788c,
+		0x378c0, 0x378d0,
+		0x378e8, 0x378ec,
+		0x37900, 0x379ac,
+		0x379c0, 0x37ac4,
+		0x37ae4, 0x37b10,
+		0x37b24, 0x37b50,
+		0x37bf0, 0x37c10,
+		0x37c24, 0x37c50,
+		0x37cf0, 0x37cfc,
+		0x40040, 0x40040,
+		0x40080, 0x40084,
+		0x40100, 0x40100,
+		0x40140, 0x401bc,
+		0x40200, 0x40214,
+		0x40228, 0x40228,
+		0x40240, 0x40258,
+		0x40280, 0x40280,
+		0x40304, 0x40304,
+		0x40330, 0x4033c,
+		0x41304, 0x413dc,
+		0x41400, 0x4141c,
+		0x41480, 0x414d0,
+		0x44000, 0x4407c,
+		0x440c0, 0x4427c,
+		0x442c0, 0x4447c,
+		0x444c0, 0x4467c,
+		0x446c0, 0x4487c,
+		0x448c0, 0x44a7c,
+		0x44ac0, 0x44c7c,
+		0x44cc0, 0x44e7c,
+		0x44ec0, 0x4507c,
+		0x450c0, 0x451fc,
+		0x45800, 0x45868,
+		0x45880, 0x45884,
+		0x458a0, 0x458b0,
+		0x45a00, 0x45a68,
+		0x45a80, 0x45a84,
+		0x45aa0, 0x45ab0,
+		0x460c0, 0x460e4,
+		0x47000, 0x4708c,
+		0x47200, 0x47250,
+		0x47400, 0x47420,
+		0x47600, 0x47618,
+		0x47800, 0x4782c,
+		0x50000, 0x500cc,
+		0x50400, 0x50400,
+		0x50800, 0x508cc,
+		0x50c00, 0x50c00,
+		0x51000, 0x510b0,
+		0x51300, 0x51324,
+	};
+
 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
 	const unsigned int *reg_ranges;
 	int reg_ranges_size, range;
@@ -1328,6 +1678,11 @@
 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
 		break;
 
+	case CHELSIO_T6:
+		reg_ranges = t6_reg_ranges;
+		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+		break;
+
 	default:
 		dev_err(adap->pdev_dev,
 			"Unsupported chip version %d\n", chip_version);
@@ -1374,17 +1729,16 @@
 }
 
 /**
- *	get_vpd_params - read VPD parameters from VPD EEPROM
+ *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
  *	@adapter: adapter to read
  *	@p: where to store the parameters
  *
  *	Reads card parameters stored in VPD EEPROM.
  */
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
 {
-	u32 cclk_param, cclk_val;
-	int i, ret, addr;
-	int ec, sn, pn;
+	int i, ret = 0, addr;
+	int ec, sn, pn, na;
 	u8 *vpd, csum;
 	unsigned int vpdr_len, kw_offset, id_len;
 
@@ -1392,6 +1746,9 @@
 	if (!vpd)
 		return -ENOMEM;
 
+	/* Card information normally starts at VPD_BASE but early cards had
+	 * it at 0.
+	 */
 	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
 	if (ret < 0)
 		goto out;
@@ -1457,6 +1814,7 @@
 	FIND_VPD_KW(ec, "EC");
 	FIND_VPD_KW(sn, "SN");
 	FIND_VPD_KW(pn, "PN");
+	FIND_VPD_KW(na, "NA");
 #undef FIND_VPD_KW
 
 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -1469,18 +1827,42 @@
 	i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
 	strim(p->pn);
+	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+	strim((char *)p->na);
 
-	/*
-	 * Ask firmware for the Core Clock since it knows how to translate the
+out:
+	vfree(vpd);
+	return ret;
+}
+
+/**
+ *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM and retrieves the Core
+ *	Clock.  This can only be called after a connection to the firmware
+ *	is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	u32 cclk_param, cclk_val;
+	int ret;
+
+	/* Grab the raw VPD parameters.
+	 */
+	ret = t4_get_raw_vpd_params(adapter, p);
+	if (ret)
+		return ret;
+
+	/* Ask firmware for the Core Clock since it knows how to translate the
 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
 	 */
 	cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 		      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
-	ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
 			      1, &cclk_param, &cclk_val);
 
-out:
-	vfree(vpd);
 	if (ret)
 		return ret;
 	p->cclk = cclk_val;
@@ -1618,7 +2000,7 @@
 		if (ret)
 			return ret;
 		if (byte_oriented)
-			*data = (__force __u32) (htonl(*data));
+			*data = (__force __u32)(cpu_to_be32(*data));
 	}
 	return 0;
 }
@@ -1941,7 +2323,8 @@
 	 * which will keep us "honest" in the future ...
 	 */
 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
-	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
 		return true;
 
 	dev_err(adap->pdev_dev,
@@ -1979,7 +2362,7 @@
 			"FW image size not multiple of 512 bytes\n");
 		return -EINVAL;
 	}
-	if (ntohs(hdr->len512) * 512 != size) {
+	if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
 		dev_err(adap->pdev_dev,
 			"FW image size differs from size in FW header\n");
 		return -EINVAL;
@@ -1993,7 +2376,7 @@
 		return -EINVAL;
 
 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
-		csum += ntohl(p[i]);
+		csum += be32_to_cpu(p[i]);
 
 	if (csum != 0xffffffff) {
 		dev_err(adap->pdev_dev,
@@ -2012,7 +2395,7 @@
 	 * first page with a bad version.
 	 */
 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
-	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
 	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
 	if (ret)
 		goto out;
@@ -2039,6 +2422,147 @@
 }
 
 /**
+ *	t4_phy_fw_ver - return current PHY firmware version
+ *	@adap: the adapter
+ *	@phy_fw_ver: return value buffer for PHY firmware version
+ *
+ *	Returns the current version of external PHY firmware on the
+ *	adapter.
+ */
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+{
+	u32 param, val;
+	int ret;
+
+	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+			      &param, &val);
+	if (ret < 0)
+		return ret;
+	*phy_fw_ver = val;
+	return 0;
+}
+
+/**
+ *	t4_load_phy_fw - download port PHY firmware
+ *	@adap: the adapter
+ *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
+ *	@win_lock: the lock to use to guard the memory copy
+ *	@phy_fw_version: function to check PHY firmware versions
+ *	@phy_fw_data: the PHY firmware image to write
+ *	@phy_fw_size: image size
+ *
+ *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
+ *	@phy_fw_version is supplied, then it will be used to determine if
+ *	it's necessary to perform the transfer by comparing the version
+ *	of any existing adapter PHY firmware with that of the passed in
+ *	PHY firmware image.  If @win_lock is non-NULL then it will be used
+ *	around the call to t4_memory_rw() which transfers the PHY firmware
+ *	to the adapter.
+ *
+ *	A negative error number will be returned if an error occurs.  If
+ *	version number support is available and there's no need to upgrade
+ *	the firmware, 0 will be returned.  If firmware is successfully
+ *	transferred to the adapter, 1 will be retured.
+ *
+ *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
+ *	a result, a RESET of the adapter would cause that RAM to lose its
+ *	contents.  Thus, loading PHY firmware on such adapters must happen
+ *	after any FW_RESET_CMDs ...
+ */
+int t4_load_phy_fw(struct adapter *adap,
+		   int win, spinlock_t *win_lock,
+		   int (*phy_fw_version)(const u8 *, size_t),
+		   const u8 *phy_fw_data, size_t phy_fw_size)
+{
+	unsigned long mtype = 0, maddr = 0;
+	u32 param, val;
+	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
+	int ret;
+
+	/* If we have version number support, then check to see if the adapter
+	 * already has up-to-date PHY firmware loaded.
+	 */
+	 if (phy_fw_version) {
+		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
+		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+		if (ret < 0)
+			return ret;
+
+		if (cur_phy_fw_ver >= new_phy_fw_vers) {
+			CH_WARN(adap, "PHY Firmware already up-to-date, "
+				"version %#x\n", cur_phy_fw_ver);
+			return 0;
+		}
+	}
+
+	/* Ask the firmware where it wants us to copy the PHY firmware image.
+	 * The size of the file requires a special version of the READ coommand
+	 * which will pass the file size via the values field in PARAMS_CMD and
+	 * retrieve the return value from firmware and place it in the same
+	 * buffer values
+	 */
+	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+	val = phy_fw_size;
+	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
+				 &param, &val, 1);
+	if (ret < 0)
+		return ret;
+	mtype = val >> 8;
+	maddr = (val & 0xff) << 16;
+
+	/* Copy the supplied PHY Firmware image to the adapter memory location
+	 * allocated by the adapter firmware.
+	 */
+	if (win_lock)
+		spin_lock_bh(win_lock);
+	ret = t4_memory_rw(adap, win, mtype, maddr,
+			   phy_fw_size, (__be32 *)phy_fw_data,
+			   T4_MEMORY_WRITE);
+	if (win_lock)
+		spin_unlock_bh(win_lock);
+	if (ret)
+		return ret;
+
+	/* Tell the firmware that the PHY firmware image has been written to
+	 * RAM and it can now start copying it over to the PHYs.  The chip
+	 * firmware will RESET the affected PHYs as part of this operation
+	 * leaving them running the new PHY firmware image.
+	 */
+	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+				    &param, &val, 30000);
+
+	/* If we have version number support, then check to see that the new
+	 * firmware got loaded properly.
+	 */
+	if (phy_fw_version) {
+		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+		if (ret < 0)
+			return ret;
+
+		if (cur_phy_fw_ver != new_phy_fw_vers) {
+			CH_WARN(adap, "PHY Firmware did not update: "
+				"version on adapter %#x, "
+				"version flashed %#x\n",
+				cur_phy_fw_ver, new_phy_fw_vers);
+			return -ENXIO;
+		}
+	}
+
+	return 1;
+}
+
+/**
  *	t4_fwcache - firmware cache operation
  *	@adap: the adapter
  *	@op  : the operation (flush or flush and invalidate)
@@ -2051,7 +2575,7 @@
 	c.op_to_vfn =
 		cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
 			    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
-			    FW_PARAMS_CMD_PFN_V(adap->fn) |
+			    FW_PARAMS_CMD_PFN_V(adap->pf) |
 			    FW_PARAMS_CMD_VFN_V(0));
 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 	c.param[0].mnem =
@@ -2062,6 +2586,61 @@
 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
 }
 
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+			unsigned int *pif_req_wrptr,
+			unsigned int *pif_rsp_wrptr)
+{
+	int i, j;
+	u32 cfg, val, req, rsp;
+
+	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+	if (cfg & LADBGEN_F)
+		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+	val = t4_read_reg(adap, CIM_DEBUGSTS_A);
+	req = POLADBGWRPTR_G(val);
+	rsp = PILADBGWRPTR_G(val);
+	if (pif_req_wrptr)
+		*pif_req_wrptr = req;
+	if (pif_rsp_wrptr)
+		*pif_rsp_wrptr = rsp;
+
+	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+		for (j = 0; j < 6; j++) {
+			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
+				     PILADBGRDPTR_V(rsp));
+			*pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
+			*pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
+			req++;
+			rsp++;
+		}
+		req = (req + 2) & POLADBGRDPTR_M;
+		rsp = (rsp + 2) & PILADBGRDPTR_M;
+	}
+	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+	u32 cfg;
+	int i, j, idx;
+
+	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+	if (cfg & LADBGEN_F)
+		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+	for (i = 0; i < CIM_MALA_SIZE; i++) {
+		for (j = 0; j < 5; j++) {
+			idx = 8 * i + j;
+			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
+				     PILADBGRDPTR_V(idx));
+			*ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
+			*ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
+		}
+	}
+	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
 {
 	unsigned int i, j;
@@ -2082,7 +2661,7 @@
 		     FW_PORT_CAP_ANEG)
 
 /**
- *	t4_link_start - apply link configuration to MAC/PHY
+ *	t4_link_l1cfg - apply link configuration to MAC/PHY
  *	@phy: the PHY to setup
  *	@mac: the MAC to setup
  *	@lc: the requested link configuration
@@ -2094,7 +2673,7 @@
  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
  *	  otherwise do it later based on the outcome of auto-negotiation.
  */
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
 		  struct link_config *lc)
 {
 	struct fw_port_cmd c;
@@ -2107,19 +2686,22 @@
 		fc |= FW_PORT_CAP_FC_TX;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
-			       FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
-	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
-				  FW_LEN16(c));
+	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				     FW_PORT_CMD_PORTID_V(port));
+	c.action_to_len16 =
+		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+			    FW_LEN16(c));
 
 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
-		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+		c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+					     fc);
 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 	} else if (lc->autoneg == AUTONEG_DISABLE) {
-		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+		c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 	} else
-		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+		c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -2137,11 +2719,13 @@
 	struct fw_port_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
-			       FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
-	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
-				  FW_LEN16(c));
-	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				     FW_PORT_CMD_PORTID_V(port));
+	c.action_to_len16 =
+		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+			    FW_LEN16(c));
+	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -2335,6 +2919,7 @@
 static void sge_intr_handler(struct adapter *adapter)
 {
 	u64 v;
+	u32 err;
 
 	static const struct intr_info sge_intr_info[] = {
 		{ ERR_CPL_EXCEED_IQE_SIZE_F,
@@ -2343,8 +2928,6 @@
 		  "SGE GTS CIDX increment too large", -1, 0 },
 		{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
 		{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
-		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
-		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
 		{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
 		{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
@@ -2357,13 +2940,19 @@
 		  0 },
 		{ ERR_ING_CTXT_PRIO_F,
 		  "SGE too many priority ingress contexts", -1, 0 },
-		{ ERR_EGR_CTXT_PRIO_F,
-		  "SGE too many priority egress contexts", -1, 0 },
 		{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
 		{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
 		{ 0 }
 	};
 
+	static struct intr_info t4t5_sge_intr_info[] = {
+		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+		{ ERR_EGR_CTXT_PRIO_F,
+		  "SGE too many priority egress contexts", -1, 0 },
+		{ 0 }
+	};
+
 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
 		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
 	if (v) {
@@ -2373,8 +2962,23 @@
 		t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
 	}
 
-	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
-	    v != 0)
+	v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+		v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+					   t4t5_sge_intr_info);
+
+	err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+	if (err & ERROR_QID_VALID_F) {
+		dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+			ERROR_QID_G(err));
+		if (err & UNCAPTURED_ERROR_F)
+			dev_err(adapter->pdev_dev,
+				"SGE UNCAPTURED_ERROR set (clearing)\n");
+		t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+			     UNCAPTURED_ERROR_F);
+	}
+
+	if (v != 0)
 		t4_fatal_err(adapter);
 }
 
@@ -2547,6 +3151,7 @@
  */
 static void le_intr_handler(struct adapter *adap)
 {
+	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
 	static const struct intr_info le_intr_info[] = {
 		{ LIPMISS_F, "LE LIP miss", -1, 0 },
 		{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2556,7 +3161,18 @@
 		{ 0 }
 	};
 
-	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
+	static struct intr_info t6_le_intr_info[] = {
+		{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+		{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+		{ TCAMINTPERR_F, "LE parity error", -1, 1 },
+		{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+		{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+				  (chip <= CHELSIO_T5) ?
+				  le_intr_info : t6_le_intr_info))
 		t4_fatal_err(adap);
 }
 
@@ -2825,7 +3441,7 @@
 		pcie_intr_handler(adapter);
 	if (cause & MC_F)
 		mem_intr_handler(adapter, MEM_MC);
-	if (!is_t4(adapter->params.chip) && (cause & MC1_S))
+	if (is_t5(adapter->params.chip) && (cause & MC1_F))
 		mem_intr_handler(adapter, MEM_MC1);
 	if (cause & EDC0_F)
 		mem_intr_handler(adapter, MEM_EDC0);
@@ -2871,17 +3487,18 @@
  */
 void t4_intr_enable(struct adapter *adapter)
 {
+	u32 val = 0;
 	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
 
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
 	t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
 		     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
-		     ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+		     ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
 		     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
 		     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
 		     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
-		     ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
-		     DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
-		     EGRESS_SIZE_ERR_F);
+		     DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
 }
@@ -2945,18 +3562,18 @@
 	struct fw_rss_ind_tbl_cmd cmd;
 
 	memset(&cmd, 0, sizeof(cmd));
-	cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
 			       FW_RSS_IND_TBL_CMD_VIID_V(viid));
-	cmd.retval_len16 = htonl(FW_LEN16(cmd));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
 
 	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
 	while (n > 0) {
 		int nq = min(n, 32);
 		__be32 *qp = &cmd.iq0_to_iq2;
 
-		cmd.niqid = htons(nq);
-		cmd.startidx = htons(start);
+		cmd.niqid = cpu_to_be16(nq);
+		cmd.startidx = cpu_to_be16(start);
 
 		start += nq;
 		n -= nq;
@@ -2974,7 +3591,7 @@
 			if (++rsp >= rsp_end)
 				rsp = rspq;
 
-			*qp++ = htonl(v);
+			*qp++ = cpu_to_be32(v);
 			nq -= 3;
 		}
 
@@ -3000,20 +3617,46 @@
 	struct fw_rss_glb_config_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
-			      FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-	c.retval_len16 = htonl(FW_LEN16(c));
+	c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
-		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+		c.u.manual.mode_pkd =
+			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
 		c.u.basicvirtual.mode_pkd =
-			htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
-		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
 	} else
 		return -EINVAL;
 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *	t4_config_vi_rss - configure per VI RSS settings
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@viid: the VI id
+ *	@flags: RSS flags
+ *	@defq: id of the default RSS queue for the VI.
+ *
+ *	Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+		     unsigned int flags, unsigned int defq)
+{
+	struct fw_rss_vi_config_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+					FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
+	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
 /* Read an RSS table row */
 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
 {
@@ -3045,6 +3688,40 @@
 }
 
 /**
+ *	t4_fw_tp_pio_rw - Access TP PIO through LDST
+ *	@adap: the adapter
+ *	@vals: where the indirect register values are stored/written
+ *	@nregs: how many indirect registers to read/write
+ *	@start_idx: index of first indirect register to read/write
+ *	@rw: Read (1) or Write (0)
+ *
+ *	Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+			    unsigned int start_index, unsigned int rw)
+{
+	int ret, i;
+	int cmd = FW_LDST_ADDRSPC_TP_PIO;
+	struct fw_ldst_cmd c;
+
+	for (i = 0 ; i < nregs; i++) {
+		memset(&c, 0, sizeof(c));
+		c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+						FW_CMD_REQUEST_F |
+						(rw ? FW_CMD_READ_F :
+						      FW_CMD_WRITE_F) |
+						FW_LDST_CMD_ADDRSPACE_V(cmd));
+		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+		c.u.addrval.addr = cpu_to_be32(start_index + i);
+		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
+		ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+		if (!ret && rw)
+			vals[i] = be32_to_cpu(c.u.addrval.val);
+	}
+}
+
+/**
  *	t4_read_rss_key - read the global RSS key
  *	@adap: the adapter
  *	@key: 10-entry array holding the 320-bit RSS key
@@ -3053,8 +3730,11 @@
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-	t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
-			 TP_RSS_SECRET_KEY0_A);
+	if (adap->flags & FW_OK)
+		t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+	else
+		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+				 TP_RSS_SECRET_KEY0_A);
 }
 
 /**
@@ -3069,11 +3749,32 @@
  */
 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
 {
-	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
-			  TP_RSS_SECRET_KEY0_A);
-	if (idx >= 0 && idx < 16)
-		t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
-			     KEYWRADDR_V(idx) | KEYWREN_F);
+	u8 rss_key_addr_cnt = 16;
+	u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+	 * allows access to key addresses 16-63 by using KeyWrAddrX
+	 * as index[5:4](upper 2) into key table
+	 */
+	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+		rss_key_addr_cnt = 32;
+
+	if (adap->flags & FW_OK)
+		t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+	else
+		t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+				  TP_RSS_SECRET_KEY0_A);
+
+	if (idx >= 0 && idx < rss_key_addr_cnt) {
+		if (rss_key_addr_cnt > 16)
+			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+				     KEYWRADDRX_V(idx >> 4) |
+				     T6_VFWRADDR_V(idx) | KEYWREN_F);
+		else
+			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+				     KEYWRADDR_V(idx) | KEYWREN_F);
+	}
 }
 
 /**
@@ -3088,8 +3789,12 @@
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
 			   u32 *valp)
 {
-	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 valp, 1, TP_RSS_PF0_CONFIG_A + index);
+	if (adapter->flags & FW_OK)
+		t4_fw_tp_pio_rw(adapter, valp, 1,
+				TP_RSS_PF0_CONFIG_A + index, 1);
+	else
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 valp, 1, TP_RSS_PF0_CONFIG_A + index);
 }
 
 /**
@@ -3107,8 +3812,13 @@
 {
 	u32 vrt, mask, data;
 
-	mask = VFWRADDR_V(VFWRADDR_M);
-	data = VFWRADDR_V(index);
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+		mask = VFWRADDR_V(VFWRADDR_M);
+		data = VFWRADDR_V(index);
+	} else {
+		 mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
+		 data = T6_VFWRADDR_V(index);
+	}
 
 	/* Request that the index'th VF Table values be read into VFL/VFH.
 	 */
@@ -3119,10 +3829,15 @@
 
 	/* Grab the VFL/VFH values ...
 	 */
-	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 vfl, 1, TP_RSS_VFL_CONFIG_A);
-	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 vfh, 1, TP_RSS_VFH_CONFIG_A);
+	if (adapter->flags & FW_OK) {
+		t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+		t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+	} else {
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 vfl, 1, TP_RSS_VFL_CONFIG_A);
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 vfh, 1, TP_RSS_VFH_CONFIG_A);
+	}
 }
 
 /**
@@ -3135,8 +3850,11 @@
 {
 	u32 pfmap;
 
-	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 &pfmap, 1, TP_RSS_PF_MAP_A);
+	if (adapter->flags & FW_OK)
+		t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+	else
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &pfmap, 1, TP_RSS_PF_MAP_A);
 	return pfmap;
 }
 
@@ -3150,8 +3868,11 @@
 {
 	u32 pfmask;
 
-	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 &pfmask, 1, TP_RSS_PF_MSK_A);
+	if (adapter->flags & FW_OK)
+		t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+	else
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &pfmask, 1, TP_RSS_PF_MSK_A);
 	return pfmask;
 }
 
@@ -3176,18 +3897,18 @@
 	if (v4) {
 		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
 				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
-		v4->tcpOutRsts = STAT(OUT_RST);
-		v4->tcpInSegs  = STAT64(IN_SEG);
-		v4->tcpOutSegs = STAT64(OUT_SEG);
-		v4->tcpRetransSegs = STAT64(RXT_SEG);
+		v4->tcp_out_rsts = STAT(OUT_RST);
+		v4->tcp_in_segs  = STAT64(IN_SEG);
+		v4->tcp_out_segs = STAT64(OUT_SEG);
+		v4->tcp_retrans_segs = STAT64(RXT_SEG);
 	}
 	if (v6) {
 		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
 				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
-		v6->tcpOutRsts = STAT(OUT_RST);
-		v6->tcpInSegs  = STAT64(IN_SEG);
-		v6->tcpOutSegs = STAT64(OUT_SEG);
-		v6->tcpRetransSegs = STAT64(RXT_SEG);
+		v6->tcp_out_rsts = STAT(OUT_RST);
+		v6->tcp_in_segs  = STAT64(IN_SEG);
+		v6->tcp_out_segs = STAT64(OUT_SEG);
+		v6->tcp_retrans_segs = STAT64(RXT_SEG);
 	}
 #undef STAT64
 #undef STAT
@@ -3195,6 +3916,130 @@
 }
 
 /**
+ *	t4_tp_get_err_stats - read TP's error MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+	/* T6 and later has 2 channels */
+	if (adap->params.arch.nchan == NCHAN) {
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tnl_cong_drops, 8,
+				 TP_MIB_TNL_CNG_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tnl_tx_drops, 4,
+				 TP_MIB_TNL_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->ofld_vlan_drops, 4,
+				 TP_MIB_OFD_VLN_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tcp6_in_errs, 4,
+				 TP_MIB_TCP_V6IN_ERR_0_A);
+	} else {
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tnl_cong_drops, 2,
+				 TP_MIB_TNL_CNG_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->ofld_chan_drops, 2,
+				 TP_MIB_OFD_CHN_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->ofld_vlan_drops, 2,
+				 TP_MIB_OFD_VLN_DROP_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+				 st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
+	}
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+	/* T6 and later has 2 channels */
+	if (adap->params.arch.nchan == NCHAN) {
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+				 8, TP_MIB_CPL_IN_REQ_0_A);
+	} else {
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+				 2, TP_MIB_CPL_IN_REQ_0_A);
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+				 2, TP_MIB_CPL_OUT_RSP_0_A);
+	}
+}
+
+/**
+ *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+			 2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ *	@adap: the adapter
+ *	@idx: the port index
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+		       struct tp_fcoe_stats *st)
+{
+	u32 val[2];
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+			 1, TP_MIB_FCOE_DDP_0_A + idx);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+			 1, TP_MIB_FCOE_DROP_0_A + idx);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+			 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+	st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+	u32 val[4];
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+			 TP_MIB_USM_PKTS_A);
+	st->frames = val[0];
+	st->drops = val[1];
+	st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
  *	@adap: the adapter
  *	@mtus: where to store the MTU values
@@ -3346,6 +4191,52 @@
 	}
 }
 
+/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks.  The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+	u64 v = bytes256 * adap->params.vpd.cclk;
+
+	return v * 62 + v / 2;
+}
+
+/**
+ *	t4_get_chan_txrate - get the current per channel Tx rates
+ *	@adap: the adapter
+ *	@nic_rate: rates for NIC traffic
+ *	@ofld_rate: rates for offloaded traffic
+ *
+ *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ *	for each channel.
+ */
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+	u32 v;
+
+	v = t4_read_reg(adap, TP_TX_TRATE_A);
+	nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
+	nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
+	if (adap->params.arch.nchan == NCHAN) {
+		nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
+		nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
+	}
+
+	v = t4_read_reg(adap, TP_TX_ORATE_A);
+	ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
+	ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
+	if (adap->params.arch.nchan == NCHAN) {
+		ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
+		ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
+	}
+}
+
 /**
  *	t4_pmtx_get_stats - returns the HW stats from PMTX
  *	@adap: the adapter
@@ -3401,7 +4292,7 @@
 }
 
 /**
- *	get_mps_bg_map - return the buffer groups associated with a port
+ *	t4_get_mps_bg_map - return the buffer groups associated with a port
  *	@adap: the adapter
  *	@idx: the port index
  *
@@ -3409,7 +4300,7 @@
  *	with the given port.  Bit i is set if buffer group i is used by the
  *	port.
  */
-static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
 {
 	u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
 
@@ -3451,6 +4342,28 @@
 }
 
 /**
+ *      t4_get_port_stats_offset - collect port stats relative to a previous
+ *                                 snapshot
+ *      @adap: The adapter
+ *      @idx: The port
+ *      @stats: Current stats to fill
+ *      @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+			      struct port_stats *stats,
+			      struct port_stats *offset)
+{
+	u64 *s, *o;
+	int i;
+
+	t4_get_port_stats(adap, idx, stats);
+	for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+			i < (sizeof(struct port_stats) / sizeof(u64));
+			i++, s++, o++)
+		*s -= *o;
+}
+
+/**
  *	t4_get_port_stats - collect port statistics
  *	@adap: the adapter
  *	@idx: the port index
@@ -3460,7 +4373,7 @@
  */
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
 {
-	u32 bgmap = get_mps_bg_map(adap, idx);
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
 
 #define GET_STAT(name) \
 	t4_read_reg64(adap, \
@@ -3534,103 +4447,51 @@
 }
 
 /**
- *	t4_wol_magic_enable - enable/disable magic packet WoL
+ *	t4_get_lb_stats - collect loopback port statistics
  *	@adap: the adapter
- *	@port: the physical port index
- *	@addr: MAC address expected in magic packets, %NULL to disable
+ *	@idx: the loopback port index
+ *	@p: the stats structure to fill
  *
- *	Enables/disables magic packet wake-on-LAN for the selected port.
+ *	Return HW statistics for the given loopback port.
  */
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
-			 const u8 *addr)
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
 {
-	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
 
-	if (is_t4(adap->params.chip)) {
-		mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
-		mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
-		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
-	} else {
-		mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
-		mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
-		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
-	}
-
-	if (addr) {
-		t4_write_reg(adap, mag_id_reg_l,
-			     (addr[2] << 24) | (addr[3] << 16) |
-			     (addr[4] << 8) | addr[5]);
-		t4_write_reg(adap, mag_id_reg_h,
-			     (addr[0] << 8) | addr[1]);
-	}
-	t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
-			 addr ? MAGICEN_F : 0);
-}
-
-/**
- *	t4_wol_pat_enable - enable/disable pattern-based WoL
- *	@adap: the adapter
- *	@port: the physical port index
- *	@map: bitmap of which HW pattern filters to set
- *	@mask0: byte mask for bytes 0-63 of a packet
- *	@mask1: byte mask for bytes 64-127 of a packet
- *	@crc: Ethernet CRC for selected bytes
- *	@enable: enable/disable switch
- *
- *	Sets the pattern filters indicated in @map to mask out the bytes
- *	specified in @mask0/@mask1 in received packets and compare the CRC of
- *	the resulting packet against @crc.  If @enable is %true pattern-based
- *	WoL is enabled, otherwise disabled.
- */
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
-		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
-{
-	int i;
-	u32 port_cfg_reg;
-
-	if (is_t4(adap->params.chip))
-		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
-	else
-		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
-
-	if (!enable) {
-		t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
-		return 0;
-	}
-	if (map > 0xff)
-		return -EINVAL;
-
-#define EPIO_REG(name) \
+#define GET_STAT(name) \
+	t4_read_reg64(adap, \
 	(is_t4(adap->params.chip) ? \
-	 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
-	 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
+	PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+	T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
 
-	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
-	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
-	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+	p->octets           = GET_STAT(BYTES);
+	p->frames           = GET_STAT(FRAMES);
+	p->bcast_frames     = GET_STAT(BCAST);
+	p->mcast_frames     = GET_STAT(MCAST);
+	p->ucast_frames     = GET_STAT(UCAST);
+	p->error_frames     = GET_STAT(ERROR);
 
-	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
-		if (!(map & 1))
-			continue;
+	p->frames_64        = GET_STAT(64B);
+	p->frames_65_127    = GET_STAT(65B_127B);
+	p->frames_128_255   = GET_STAT(128B_255B);
+	p->frames_256_511   = GET_STAT(256B_511B);
+	p->frames_512_1023  = GET_STAT(512B_1023B);
+	p->frames_1024_1518 = GET_STAT(1024B_1518B);
+	p->frames_1519_max  = GET_STAT(1519B_MAX);
+	p->drop             = GET_STAT(DROP_FRAMES);
 
-		/* write byte masks */
-		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
-		t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
-		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
-			return -ETIMEDOUT;
+	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
 
-		/* write CRC */
-		t4_write_reg(adap, EPIO_REG(DATA0), crc);
-		t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
-		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
-			return -ETIMEDOUT;
-	}
-#undef EPIO_REG
-
-	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
-	return 0;
+#undef GET_STAT
+#undef GET_STAT_COM
 }
 
 /*     t4_mk_filtdelwr - create a delete filter WR
@@ -3644,33 +4505,38 @@
 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
 {
 	memset(wr, 0, sizeof(*wr));
-	wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
-	wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
-	wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
-			FW_FILTER_WR_NOREPLY_V(qid < 0));
-	wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
+	wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
+	wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
+	wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
+				    FW_FILTER_WR_NOREPLY_V(qid < 0));
+	wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
 	if (qid >= 0)
-		wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
+		wr->rx_chan_rx_rpl_iq =
+			cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
 }
 
 #define INIT_CMD(var, cmd, rd_wr) do { \
-	(var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
-				  FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
-	(var).retval_len16 = htonl(FW_LEN16(var)); \
+	(var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+					FW_CMD_REQUEST_F | \
+					FW_CMD_##rd_wr##_F); \
+	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
 } while (0)
 
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
 			  u32 addr, u32 val)
 {
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_WRITE_F |
-			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.addrval.addr = htonl(addr);
-	c.u.addrval.val = htonl(val);
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F |
+					FW_CMD_WRITE_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.addrval.addr = cpu_to_be32(addr);
+	c.u.addrval.val = cpu_to_be32(val);
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -3690,19 +4556,22 @@
 	       unsigned int mmd, unsigned int reg, u16 *valp)
 {
 	int ret;
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
-		FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
-				   FW_LDST_CMD_MMD_V(mmd));
-	c.u.mdio.raddr = htons(reg);
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F | FW_CMD_READ_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+					 FW_LDST_CMD_MMD_V(mmd));
+	c.u.mdio.raddr = cpu_to_be16(reg);
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret == 0)
-		*valp = ntohs(c.u.mdio.rval);
+		*valp = be16_to_cpu(c.u.mdio.rval);
 	return ret;
 }
 
@@ -3720,16 +4589,19 @@
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 	       unsigned int mmd, unsigned int reg, u16 val)
 {
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
-		FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
-				   FW_LDST_CMD_MMD_V(mmd));
-	c.u.mdio.raddr = htons(reg);
-	c.u.mdio.rval = htons(val);
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+					 FW_LDST_CMD_MMD_V(mmd));
+	c.u.mdio.raddr = cpu_to_be16(reg);
+	c.u.mdio.rval = cpu_to_be16(val);
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -3841,6 +4713,32 @@
 }
 
 /**
+ *      t4_sge_ctxt_flush - flush the SGE context cache
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *
+ *      Issues a FW command through the given mailbox to flush the
+ *      SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+	int ret;
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F | FW_CMD_READ_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	return ret;
+}
+
+/**
  *      t4_fw_hello - establish communication with FW
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
@@ -3863,11 +4761,11 @@
 retry:
 	memset(&c, 0, sizeof(c));
 	INIT_CMD(c, HELLO, WRITE);
-	c.err_to_clearinit = htonl(
+	c.err_to_clearinit = cpu_to_be32(
 		FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
 		FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
-		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
-				      FW_HELLO_CMD_MBMASTER_M) |
+		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
+					mbox : FW_HELLO_CMD_MBMASTER_M) |
 		FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
 		FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
 		FW_HELLO_CMD_CLEARINIT_F);
@@ -3888,7 +4786,7 @@
 		return ret;
 	}
 
-	v = ntohl(c.err_to_clearinit);
+	v = be32_to_cpu(c.err_to_clearinit);
 	master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
 	if (state) {
 		if (v & FW_HELLO_CMD_ERR_F)
@@ -4017,7 +4915,7 @@
 
 	memset(&c, 0, sizeof(c));
 	INIT_CMD(c, RESET, WRITE);
-	c.val = htonl(reset);
+	c.val = cpu_to_be32(reset);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4050,8 +4948,8 @@
 
 		memset(&c, 0, sizeof(c));
 		INIT_CMD(c, RESET, WRITE);
-		c.val = htonl(PIORST_F | PIORSTMODE_F);
-		c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
+		c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
+		c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 	}
 
@@ -4190,7 +5088,7 @@
 	 * the newly loaded firmware will handle this right by checking
 	 * its header flags to see if it advertises the capability.
 	 */
-	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
 	return t4_fw_restart(adap, mbox, reset);
 }
 
@@ -4321,7 +5219,7 @@
 }
 
 /**
- *	t4_query_params - query FW or device parameters
+ *	t4_query_params_rw - query FW or device parameters
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
  *	@pf: the PF
@@ -4329,13 +5227,14 @@
  *	@nparams: the number of parameters
  *	@params: the parameter names
  *	@val: the parameter values
+ *	@rw: Write and read flag
  *
  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
  *	queried at once.
  */
-int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
-		    unsigned int vf, unsigned int nparams, const u32 *params,
-		    u32 *val)
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		       unsigned int vf, unsigned int nparams, const u32 *params,
+		       u32 *val, int rw)
 {
 	int i, ret;
 	struct fw_params_cmd c;
@@ -4345,22 +5244,35 @@
 		return -EINVAL;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
-			    FW_PARAMS_CMD_VFN_V(vf));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	for (i = 0; i < nparams; i++, p += 2)
-		*p = htonl(*params++);
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				  FW_PARAMS_CMD_PFN_V(pf) |
+				  FW_PARAMS_CMD_VFN_V(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+	for (i = 0; i < nparams; i++) {
+		*p++ = cpu_to_be32(*params++);
+		if (rw)
+			*p = cpu_to_be32(*(val + i));
+		p++;
+	}
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret == 0)
 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
-			*val++ = ntohl(*p);
+			*val++ = be32_to_cpu(*p);
 	return ret;
 }
 
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int nparams, const u32 *params,
+		    u32 *val)
+{
+	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
 /**
- *      t4_set_params_nosleep - sets FW or device parameters
+ *      t4_set_params_timeout - sets FW or device parameters
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
  *      @pf: the PF
@@ -4368,15 +5280,15 @@
  *      @nparams: the number of parameters
  *      @params: the parameter names
  *      @val: the parameter values
+ *      @timeout: the timeout time
  *
- *	 Does not ever sleep
  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
  *      specified at once.
  */
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
 			  unsigned int pf, unsigned int vf,
 			  unsigned int nparams, const u32 *params,
-			  const u32 *val)
+			  const u32 *val, int timeout)
 {
 	struct fw_params_cmd c;
 	__be32 *p = &c.param[0].mnem;
@@ -4386,9 +5298,9 @@
 
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
-				FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
-				FW_PARAMS_CMD_PFN_V(pf) |
-				FW_PARAMS_CMD_VFN_V(vf));
+				  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				  FW_PARAMS_CMD_PFN_V(pf) |
+				  FW_PARAMS_CMD_VFN_V(vf));
 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 
 	while (nparams--) {
@@ -4396,7 +5308,7 @@
 		*p++ = cpu_to_be32(*val++);
 	}
 
-	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
 }
 
 /**
@@ -4416,23 +5328,8 @@
 		  unsigned int vf, unsigned int nparams, const u32 *params,
 		  const u32 *val)
 {
-	struct fw_params_cmd c;
-	__be32 *p = &c.param[0].mnem;
-
-	if (nparams > 7)
-		return -EINVAL;
-
-	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
-			    FW_PARAMS_CMD_VFN_V(vf));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	while (nparams--) {
-		*p++ = htonl(*params++);
-		*p++ = htonl(*val++);
-	}
-
-	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+				     FW_CMD_MAX_TIMEOUT);
 }
 
 /**
@@ -4465,20 +5362,21 @@
 	struct fw_pfvf_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
-			    FW_PFVF_CMD_VFN_V(vf));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
-			       FW_PFVF_CMD_NIQ_V(rxq));
-	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
-			       FW_PFVF_CMD_PMASK_V(pmask) |
-			       FW_PFVF_CMD_NEQ_V(txq));
-	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
-				FW_PFVF_CMD_NEXACTF_V(nexact));
-	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
-				     FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
-				     FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+				  FW_PFVF_CMD_VFN_V(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+				     FW_PFVF_CMD_NIQ_V(rxq));
+	c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
+				    FW_PFVF_CMD_PMASK_V(pmask) |
+				    FW_PFVF_CMD_NEQ_V(txq));
+	c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
+				      FW_PFVF_CMD_NVI_V(vi) |
+				      FW_PFVF_CMD_NEXACTF_V(nexact));
+	c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+					FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+					FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4507,10 +5405,10 @@
 	struct fw_vi_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-			    FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
-	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+				  FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
 	c.portid_pkd = FW_VI_CMD_PORTID_V(port);
 	c.nmac = nmac - 1;
 
@@ -4532,8 +5430,35 @@
 		}
 	}
 	if (rss_size)
-		*rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
-	return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
+		*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+	return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
+}
+
+/**
+ *	t4_free_vi - free a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@viid: virtual interface identifiler
+ *
+ *	Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int viid)
+{
+	struct fw_vi_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+				  FW_CMD_REQUEST_F |
+				  FW_CMD_EXEC_F |
+				  FW_VI_CMD_PFN_V(pf) |
+				  FW_VI_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+	c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 }
 
 /**
@@ -4569,14 +5494,16 @@
 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
-			     FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
-				  FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
-				  FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
-				  FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
-				  FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_RXMODE_CMD_VIID_V(viid));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.mtu_to_vlanexen =
+		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
@@ -4606,43 +5533,71 @@
 		      unsigned int viid, bool free, unsigned int naddr,
 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
 {
-	int i, ret;
+	int offset, ret = 0;
 	struct fw_vi_mac_cmd c;
-	struct fw_vi_mac_exact *p;
-	unsigned int max_naddr = is_t4(adap->params.chip) ?
-				       NUM_MPS_CLS_SRAM_L_INSTANCES :
-				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+	unsigned int nfilters = 0;
+	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+	unsigned int rem = naddr;
 
-	if (naddr > 7)
+	if (naddr > max_naddr)
 		return -EINVAL;
 
-	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
-			     FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
-			     FW_VI_MAC_CMD_VIID_V(viid));
-	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
-				    FW_CMD_LEN16_V((naddr + 2) / 2));
+	for (offset = 0; offset < naddr ; /**/) {
+		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+					 rem : ARRAY_SIZE(c.u.exact));
+		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+						     u.exact[fw_naddr]), 16);
+		struct fw_vi_mac_exact *p;
+		int i;
 
-	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
-		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
-				      FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
-		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+		memset(&c, 0, sizeof(c));
+		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+					   FW_CMD_REQUEST_F |
+					   FW_CMD_WRITE_F |
+					   FW_CMD_EXEC_V(free) |
+					   FW_VI_MAC_CMD_VIID_V(viid));
+		c.freemacs_to_len16 =
+			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+				    FW_CMD_LEN16_V(len16));
+
+		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+			p->valid_to_idx =
+				cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+					    FW_VI_MAC_CMD_IDX_V(
+						    FW_VI_MAC_ADD_MAC));
+			memcpy(p->macaddr, addr[offset + i],
+			       sizeof(p->macaddr));
+		}
+
+		/* It's okay if we run out of space in our MAC address arena.
+		 * Some of the addresses we submit may get stored so we need
+		 * to run through the reply to see what the results were ...
+		 */
+		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+		if (ret && ret != -FW_ENOMEM)
+			break;
+
+		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+			u16 index = FW_VI_MAC_CMD_IDX_G(
+					be16_to_cpu(p->valid_to_idx));
+
+			if (idx)
+				idx[offset + i] = (index >= max_naddr ?
+						   0xffff : index);
+			if (index < max_naddr)
+				nfilters++;
+			else if (hash)
+				*hash |= (1ULL <<
+					  hash_mac_addr(addr[offset + i]));
+		}
+
+		free = false;
+		offset += fw_naddr;
+		rem -= fw_naddr;
 	}
 
-	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
-	if (ret)
-		return ret;
-
-	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
-		u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
-
-		if (idx)
-			idx[i] = index >= max_naddr ? 0xffff : index;
-		if (index < max_naddr)
-			ret++;
-		else if (hash)
-			*hash |= (1ULL << hash_mac_addr(addr[i]));
-	}
+	if (ret == 0 || ret == -FW_ENOMEM)
+		ret = nfilters;
 	return ret;
 }
 
@@ -4671,26 +5626,25 @@
 	int ret, mode;
 	struct fw_vi_mac_cmd c;
 	struct fw_vi_mac_exact *p = c.u.exact;
-	unsigned int max_mac_addr = is_t4(adap->params.chip) ?
-				    NUM_MPS_CLS_SRAM_L_INSTANCES :
-				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
 
 	if (idx < 0)                             /* new allocation */
 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
-			     FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
-	c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
-	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
-				FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
-				FW_VI_MAC_CMD_IDX_V(idx));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_MAC_CMD_VIID_V(viid));
+	c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
+	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+				      FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+				      FW_VI_MAC_CMD_IDX_V(idx));
 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret == 0) {
-		ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
 		if (ret >= max_mac_addr)
 			ret = -ENOMEM;
 	}
@@ -4714,11 +5668,12 @@
 	struct fw_vi_mac_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
-			     FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
-				    FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
-				    FW_CMD_LEN16_V(1));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_ENABLE_CMD_VIID_V(viid));
+	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+					  FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+					  FW_CMD_LEN16_V(1));
 	c.u.hash.hashvec = cpu_to_be64(vec);
 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
@@ -4741,12 +5696,13 @@
 	struct fw_vi_enable_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
-			     FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-
-	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
-			       FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
-			       FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				   FW_VI_ENABLE_CMD_VIID_V(viid));
+	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+				     FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+				     FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
+				     FW_LEN16(c));
 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4781,10 +5737,11 @@
 	struct fw_vi_enable_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
-			     FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
-	c.blinkdur = htons(nblinks);
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				   FW_VI_ENABLE_CMD_VIID_V(viid));
+	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
+	c.blinkdur = cpu_to_be16(nblinks);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4808,14 +5765,14 @@
 	struct fw_iq_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
-			    FW_IQ_CMD_VFN_V(vf));
-	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
-	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
-	c.iqid = htons(iqid);
-	c.fl0id = htons(fl0id);
-	c.fl1id = htons(fl1id);
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+				  FW_IQ_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+	c.iqid = cpu_to_be16(iqid);
+	c.fl0id = cpu_to_be16(fl0id);
+	c.fl1id = cpu_to_be16(fl1id);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4835,11 +5792,12 @@
 	struct fw_eq_eth_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
-			    FW_EQ_ETH_CMD_VFN_V(vf));
-	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
-	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				  FW_EQ_ETH_CMD_PFN_V(pf) |
+				  FW_EQ_ETH_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+	c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4859,11 +5817,12 @@
 	struct fw_eq_ctrl_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
-			    FW_EQ_CTRL_CMD_VFN_V(vf));
-	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
-	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				  FW_EQ_CTRL_CMD_PFN_V(pf) |
+				  FW_EQ_CTRL_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+	c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4883,11 +5842,12 @@
 	struct fw_eq_ofld_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
-			    FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
-			    FW_EQ_OFLD_CMD_VFN_V(vf));
-	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
-	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				  FW_EQ_OFLD_CMD_PFN_V(pf) |
+				  FW_EQ_OFLD_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+	c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4905,11 +5865,11 @@
 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
 		int speed = 0, fc = 0;
 		const struct fw_port_cmd *p = (void *)rpl;
-		int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
+		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
 		int port = adap->chan_map[chan];
 		struct port_info *pi = adap2pinfo(adap, port);
 		struct link_config *lc = &pi->link_cfg;
-		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+		u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
 		int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
 		u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
 
@@ -5043,6 +6003,22 @@
 	return 0;
 }
 
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+	u16 val;
+	u32 pcie_cap;
+
+	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+	if (pcie_cap) {
+		pci_read_config_word(adapter->pdev,
+				     pcie_cap + PCI_EXP_DEVCTL2, &val);
+		val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+		val |= range;
+		pci_write_config_word(adapter->pdev,
+				      pcie_cap + PCI_EXP_DEVCTL2, val);
+	}
+}
+
 /**
  *	t4_prep_adapter - prepare SW and HW for operation
  *	@adapter: the adapter
@@ -5075,9 +6051,30 @@
 	switch (ver) {
 	case CHELSIO_T4:
 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+		adapter->params.arch.sge_fl_db = DBPRIO_F;
+		adapter->params.arch.mps_tcam_size =
+				 NUM_MPS_CLS_SRAM_L_INSTANCES;
+		adapter->params.arch.mps_rplc_size = 128;
+		adapter->params.arch.nchan = NCHAN;
+		adapter->params.arch.vfcount = 128;
 		break;
 	case CHELSIO_T5:
 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+		adapter->params.arch.mps_tcam_size =
+				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		adapter->params.arch.mps_rplc_size = 128;
+		adapter->params.arch.nchan = NCHAN;
+		adapter->params.arch.vfcount = 128;
+		break;
+	case CHELSIO_T6:
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+		adapter->params.arch.sge_fl_db = 0;
+		adapter->params.arch.mps_tcam_size =
+				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		adapter->params.arch.mps_rplc_size = 256;
+		adapter->params.arch.nchan = 2;
+		adapter->params.arch.vfcount = 256;
 		break;
 	default:
 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -5094,11 +6091,14 @@
 	adapter->params.nports = 1;
 	adapter->params.portvec = 1;
 	adapter->params.vpd.cclk = 50000;
+
+	/* Set pci completion timeout value to 4 seconds. */
+	set_pcie_completion_timeout(adapter, 0xd);
 	return 0;
 }
 
 /**
- *	cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
  *	@adapter: the adapter
  *	@qid: the Queue ID
  *	@qtype: the Ingress or Egress type for @qid
@@ -5123,7 +6123,7 @@
  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  *	then these "Inferred Queue ID" register may not be used.
  */
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
 		      unsigned int qid,
 		      enum t4_bar2_qtype qtype,
 		      int user,
@@ -5155,7 +6155,7 @@
 	 *  o The BAR2 Queue ID.
 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
 	 */
-	bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
 	bar2_qid = qid & qpp_mask;
 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
 
@@ -5224,18 +6224,19 @@
 	/* Otherwise, ask the firmware for it's Device Log Parameters.
 	 */
 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
-	devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
-				       FW_CMD_REQUEST_F | FW_CMD_READ_F);
-	devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+	devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+					     FW_CMD_REQUEST_F | FW_CMD_READ_F);
+	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
 			 &devlog_cmd);
 	if (ret)
 		return ret;
 
-	devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+	devlog_meminfo =
+		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
 	dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
 	dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
-	dparams->size = ntohl(devlog_cmd.memsize_devlog);
+	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
 
 	return 0;
 }
@@ -5256,13 +6257,13 @@
 	 */
 	hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
 	s_hps = (HOSTPAGESIZEPF0_S +
-		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
 	sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
 
 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
 	 */
 	s_qpp = (QUEUESPERPAGEPF0_S +
-		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
 	qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
 	sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
 	qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
@@ -5293,12 +6294,19 @@
 	/* Cache the adapter's Compressed Filter Mode and global Incress
 	 * Configuration.
 	 */
-	t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 &adap->params.tp.vlan_pri_map, 1,
-			 TP_VLAN_PRI_MAP_A);
-	t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-			 &adap->params.tp.ingress_config, 1,
-			 TP_INGRESS_CONFIG_A);
+	if (adap->flags & FW_OK) {
+		t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+				TP_VLAN_PRI_MAP_A, 1);
+		t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+				TP_INGRESS_CONFIG_A, 1);
+	} else {
+		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &adap->params.tp.vlan_pri_map, 1,
+				 TP_VLAN_PRI_MAP_A);
+		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &adap->params.tp.ingress_config, 1,
+				 TP_INGRESS_CONFIG_A);
+	}
 
 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
 	 * shift positions of several elements of the Compressed Filter Tuple
@@ -5374,6 +6382,29 @@
 	return field_shift;
 }
 
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+	int i, ret;
+	struct fw_rss_vi_config_cmd rvc;
+
+	memset(&rvc, 0, sizeof(rvc));
+
+	for_each_port(adap, i) {
+		struct port_info *p = adap2pinfo(adap, i);
+
+		rvc.op_to_viid =
+			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
+		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+		if (ret)
+			return ret;
+		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+	}
+	return 0;
+}
+
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
 {
 	u8 addr[6];
@@ -5391,10 +6422,10 @@
 		while ((adap->params.portvec & (1 << j)) == 0)
 			j++;
 
-		c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
-				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
-				       FW_PORT_CMD_PORTID_V(j));
-		c.action_to_len16 = htonl(
+		c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+					     FW_CMD_REQUEST_F | FW_CMD_READ_F |
+					     FW_PORT_CMD_PORTID_V(j));
+		c.action_to_len16 = cpu_to_be32(
 			FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
 			FW_LEN16(c));
 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -5412,22 +6443,23 @@
 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
 		adap->port[i]->dev_port = j;
 
-		ret = ntohl(c.u.info.lstatus_to_modtype);
+		ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
 		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
 			FW_PORT_CMD_MDIOADDR_G(ret) : -1;
 		p->port_type = FW_PORT_CMD_PTYPE_G(ret);
 		p->mod_type = FW_PORT_MOD_TYPE_NA;
 
-		rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
-				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
-				       FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
-		rvc.retval_len16 = htonl(FW_LEN16(rvc));
+		rvc.op_to_viid =
+			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				    FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
 		if (ret)
 			return ret;
-		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
 
-		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+		init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
 		j++;
 	}
 	return 0;
@@ -5718,3 +6750,130 @@
 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
 			     cfg | adap->params.tp.la_mask);
 }
+
+/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ *	@adapter: the adapter
+ *	@idma: the adapter IDMA Monitor state
+ *
+ *	Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+			  struct sge_idma_monitor_state *idma)
+{
+	/* Initialize the state variables for detecting an SGE Ingress DMA
+	 * hang.  The SGE has internal counters which count up on each clock
+	 * tick whenever the SGE finds its Ingress DMA State Engines in the
+	 * same state they were on the previous clock tick.  The clock used is
+	 * the Core Clock so we have a limit on the maximum "time" they can
+	 * record; typically a very small number of seconds.  For instance,
+	 * with a 600MHz Core Clock, we can only count up to a bit more than
+	 * 7s.  So we'll synthesize a larger counter in order to not run the
+	 * risk of having the "timers" overflow and give us the flexibility to
+	 * maintain a Hung SGE State Machine of our own which operates across
+	 * a longer time frame.
+	 */
+	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+	idma->idma_stalled[0] = 0;
+	idma->idma_stalled[1] = 0;
+}
+
+/**
+ *	t4_idma_monitor - monitor SGE Ingress DMA state
+ *	@adapter: the adapter
+ *	@idma: the adapter IDMA Monitor state
+ *	@hz: number of ticks/second
+ *	@ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+		     struct sge_idma_monitor_state *idma,
+		     int hz, int ticks)
+{
+	int i, idma_same_state_cnt[2];
+
+	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
+	  * are counters inside the SGE which count up on each clock when the
+	  * SGE finds its Ingress DMA State Engines in the same states they
+	  * were in the previous clock.  The counters will peg out at
+	  * 0xffffffff without wrapping around so once they pass the 1s
+	  * threshold they'll stay above that till the IDMA state changes.
+	  */
+	t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
+	idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
+	idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+
+	for (i = 0; i < 2; i++) {
+		u32 debug0, debug11;
+
+		/* If the Ingress DMA Same State Counter ("timer") is less
+		 * than 1s, then we can reset our synthesized Stall Timer and
+		 * continue.  If we have previously emitted warnings about a
+		 * potential stalled Ingress Queue, issue a note indicating
+		 * that the Ingress Queue has resumed forward progress.
+		 */
+		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
+				dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
+					 "resumed after %d seconds\n",
+					 i, idma->idma_qid[i],
+					 idma->idma_stalled[i] / hz);
+			idma->idma_stalled[i] = 0;
+			continue;
+		}
+
+		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+		 * domain.  The first time we get here it'll be because we
+		 * passed the 1s Threshold; each additional time it'll be
+		 * because the RX Timer Callback is being fired on its regular
+		 * schedule.
+		 *
+		 * If the stall is below our Potential Hung Ingress Queue
+		 * Warning Threshold, continue.
+		 */
+		if (idma->idma_stalled[i] == 0) {
+			idma->idma_stalled[i] = hz;
+			idma->idma_warn[i] = 0;
+		} else {
+			idma->idma_stalled[i] += ticks;
+			idma->idma_warn[i] -= ticks;
+		}
+
+		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
+			continue;
+
+		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+		 */
+		if (idma->idma_warn[i] > 0)
+			continue;
+		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
+
+		/* Read and save the SGE IDMA State and Queue ID information.
+		 * We do this every time in case it changes across time ...
+		 * can't be too careful ...
+		 */
+		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
+		debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
+		debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+		dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
+			 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+			 i, idma->idma_qid[i], idma->idma_state[i],
+			 idma->idma_stalled[i] / hz,
+			 debug0, debug11);
+		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+	}
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 380b15c..c8488f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -52,8 +52,6 @@
 	MBOX_LEN       = 64,    /* mailbox size in bytes */
 	TRACE_LEN      = 112,   /* length of trace data and mask */
 	FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
-	NWOL_PAT       = 8,     /* # of WoL patterns */
-	WOL_PAT_LEN    = 128,   /* length of WoL patterns */
 };
 
 enum {
@@ -61,6 +59,8 @@
 	CIM_NUM_OBQ    = 6,     /* # of CIM OBQs */
 	CIM_NUM_OBQ_T5 = 8,     /* # of CIM OBQs for T5 adapter */
 	CIMLA_SIZE     = 2048,  /* # of 32-bit words in CIM LA */
+	CIM_PIFLA_SIZE = 64,    /* # of 192-bit words in CIM PIF LA */
+	CIM_MALA_SIZE  = 64,    /* # of 160-bit words in CIM MA LA */
 	CIM_IBQ_SIZE   = 128,   /* # of 128-bit words in a CIM IBQ */
 	CIM_OBQ_SIZE   = 128,   /* # of 128-bit words in a CIM OBQ */
 	TPLA_SIZE      = 128,   /* # of 64-bit words in TP LA */
@@ -152,17 +152,33 @@
 	};
 };
 
-#define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
-#define RSPD_QID(x) RSPD_LEN(x)
+#define RSPD_NEWBUF_S    31
+#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
+#define RSPD_NEWBUF_F    RSPD_NEWBUF_V(1U)
 
-#define RSPD_GEN(x)  ((x) >> 7)
-#define RSPD_TYPE(x) (((x) >> 4) & 3)
+#define RSPD_LEN_S    0
+#define RSPD_LEN_M    0x7fffffff
+#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
 
-#define V_QINTR_CNT_EN	   0x0
-#define QINTR_CNT_EN       0x1
-#define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+#define RSPD_QID_S    RSPD_LEN_S
+#define RSPD_QID_M    RSPD_LEN_M
+#define RSPD_QID_G(x) RSPD_LEN_G(x)
+
+#define RSPD_GEN_S    7
+
+#define RSPD_TYPE_S    4
+#define RSPD_TYPE_M    0x3
+#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define QINTR_CNT_EN_S    0
+#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
+#define QINTR_CNT_EN_F    QINTR_CNT_EN_V(1U)
+
+#define QINTR_TIMER_IDX_S    1
+#define QINTR_TIMER_IDX_M    0x7
+#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
+#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
 
 /*
  * Flash layout.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 30a2f56..132cb8f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -634,26 +634,9 @@
 
 struct cpl_tx_pkt_core {
 	__be32 ctrl0;
-#define TXPKT_VF(x)        ((x) << 0)
-#define TXPKT_PF(x)        ((x) << 8)
-#define TXPKT_VF_VLD       (1 << 11)
-#define TXPKT_OVLAN_IDX(x) ((x) << 12)
-#define TXPKT_INTF(x)      ((x) << 16)
-#define TXPKT_INS_OVLAN    (1 << 21)
-#define TXPKT_OPCODE(x)    ((x) << 24)
 	__be16 pack;
 	__be16 len;
 	__be64 ctrl1;
-#define TXPKT_CSUM_END(x)   ((x) << 12)
-#define TXPKT_CSUM_START(x) ((x) << 20)
-#define TXPKT_IPHDR_LEN(x)  ((u64)(x) << 20)
-#define TXPKT_CSUM_LOC(x)   ((u64)(x) << 30)
-#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
-#define TXPKT_CSUM_TYPE(x)  ((u64)(x) << 40)
-#define TXPKT_VLAN(x)       ((u64)(x) << 44)
-#define TXPKT_VLAN_VLD      (1ULL << 60)
-#define TXPKT_IPCSUM_DIS    (1ULL << 62)
-#define TXPKT_L4CSUM_DIS    (1ULL << 63)
 };
 
 struct cpl_tx_pkt {
@@ -663,16 +646,69 @@
 
 #define cpl_tx_pkt_xt cpl_tx_pkt
 
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S    0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S    8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S    11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F    TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S    12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S    16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S    21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F    TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S    24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S    12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S    20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S    20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S    30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S    34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define T6_TXPKT_ETHHDR_LEN_S    32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S    40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S    44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S    60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F    TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S    62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F    TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S    63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F    TXPKT_L4CSUM_DIS_V(1ULL)
+
 struct cpl_tx_pkt_lso_core {
 	__be32 lso_ctrl;
-#define LSO_TCPHDR_LEN(x) ((x) << 0)
-#define LSO_IPHDR_LEN(x)  ((x) << 4)
-#define LSO_ETHHDR_LEN(x) ((x) << 16)
-#define LSO_IPV6(x)       ((x) << 20)
-#define LSO_LAST_SLICE    (1 << 22)
-#define LSO_FIRST_SLICE   (1 << 23)
-#define LSO_OPCODE(x)     ((x) << 24)
-#define LSO_T5_XFER_SIZE(x) ((x) << 0)
 	__be16 ipid_ofst;
 	__be16 mss;
 	__be32 seqno_offset;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 1a9a6f3..d7ca106 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,8 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5088),	/* Custom T570-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5089),	/* Custom T520-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5090),	/* Custom T540-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5091),	/* Custom T522-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5092),	/* Custom T520-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 326674b..375a825 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -418,6 +418,20 @@
 #define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
 #define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
 
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S    18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F    UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S    17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F    ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S    0
+#define ERROR_QID_M    0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
 #define HP_INT_THRESH_S    28
 #define HP_INT_THRESH_M    0xfU
 #define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
@@ -448,8 +462,13 @@
 #define SGE_STAT_MATCH_A	0x10e8
 #define SGE_STAT_CFG_A		0x10ec
 
+#define STATMODE_S    2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
 #define STATSOURCE_T5_S    9
+#define STATSOURCE_T5_M    0xfU
 #define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
 
 #define SGE_DBFIFO_STATUS2_A 0x1118
 
@@ -705,6 +724,10 @@
 #define REGISTER_S    0
 #define REGISTER_V(x) ((x) << REGISTER_S)
 
+#define T6_ENABLE_S    31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F    T6_ENABLE_V(1U)
+
 #define PFNUM_S    0
 #define PFNUM_V(x) ((x) << PFNUM_S)
 
@@ -1338,6 +1361,42 @@
 #define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
 #define FLMTXFLSTEMPTY_F    FLMTXFLSTEMPTY_V(1U)
 
+#define TP_TX_ORATE_A 0x7ebc
+
+#define OFDRATE3_S    24
+#define OFDRATE3_M    0xffU
+#define OFDRATE3_G(x) (((x) >> OFDRATE3_S) & OFDRATE3_M)
+
+#define OFDRATE2_S    16
+#define OFDRATE2_M    0xffU
+#define OFDRATE2_G(x) (((x) >> OFDRATE2_S) & OFDRATE2_M)
+
+#define OFDRATE1_S    8
+#define OFDRATE1_M    0xffU
+#define OFDRATE1_G(x) (((x) >> OFDRATE1_S) & OFDRATE1_M)
+
+#define OFDRATE0_S    0
+#define OFDRATE0_M    0xffU
+#define OFDRATE0_G(x) (((x) >> OFDRATE0_S) & OFDRATE0_M)
+
+#define TP_TX_TRATE_A 0x7ed0
+
+#define TNLRATE3_S    24
+#define TNLRATE3_M    0xffU
+#define TNLRATE3_G(x) (((x) >> TNLRATE3_S) & TNLRATE3_M)
+
+#define TNLRATE2_S    16
+#define TNLRATE2_M    0xffU
+#define TNLRATE2_G(x) (((x) >> TNLRATE2_S) & TNLRATE2_M)
+
+#define TNLRATE1_S    8
+#define TNLRATE1_M    0xffU
+#define TNLRATE1_G(x) (((x) >> TNLRATE1_S) & TNLRATE1_M)
+
+#define TNLRATE0_S    0
+#define TNLRATE0_M    0xffU
+#define TNLRATE0_G(x) (((x) >> TNLRATE0_S) & TNLRATE0_M)
+
 #define TP_VLAN_PRI_MAP_A 0x140
 
 #define FRAGMENTATION_S    9
@@ -1399,6 +1458,8 @@
 #define CSUM_HAS_PSEUDO_HDR_F    CSUM_HAS_PSEUDO_HDR_V(1U)
 
 #define TP_MIB_MAC_IN_ERR_0_A	0x0
+#define TP_MIB_HDR_IN_ERR_0_A	0x4
+#define TP_MIB_TCP_IN_ERR_0_A	0x8
 #define TP_MIB_TCP_OUT_RST_A	0xc
 #define TP_MIB_TCP_IN_SEG_HI_A	0x10
 #define TP_MIB_TCP_IN_SEG_LO_A	0x11
@@ -1407,11 +1468,19 @@
 #define TP_MIB_TCP_RXT_SEG_HI_A	0x14
 #define TP_MIB_TCP_RXT_SEG_LO_A	0x15
 #define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
 #define TP_MIB_TCP_V6IN_ERR_0_A 0x28
 #define TP_MIB_TCP_V6OUT_RST_A	0x2c
 #define TP_MIB_OFD_ARP_DROP_A	0x36
+#define TP_MIB_CPL_IN_REQ_0_A	0x38
+#define TP_MIB_CPL_OUT_RSP_0_A	0x3c
 #define TP_MIB_TNL_DROP_0_A	0x44
+#define TP_MIB_FCOE_DDP_0_A	0x48
+#define TP_MIB_FCOE_DROP_0_A	0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A	0x50
 #define TP_MIB_OFD_VLN_DROP_0_A	0x58
+#define TP_MIB_USM_PKTS_A	0x5c
+#define TP_MIB_RQE_DFR_PKT_A	0x64
 
 #define ULP_TX_INT_CAUSE_A	0x8dcc
 
@@ -1572,6 +1641,7 @@
 #define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
 #define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
 #define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
 #define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
 #define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
 #define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
@@ -2054,6 +2124,11 @@
 #define VFLKPIDX_M    0xffU
 #define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
 
+#define T6_VFWRADDR_S    8
+#define T6_VFWRADDR_M    0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
 #define TP_RSS_CONFIG_CNG_A 0x7e04
 #define TP_RSS_SECRET_KEY0_A 0x40
 #define TP_RSS_PF0_CONFIG_A 0x30
@@ -2175,7 +2250,28 @@
 #define MPS_RX_PERR_INT_CAUSE_A 0x11074
 
 #define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define DMACH_S    0
+#define DMACH_M    0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
 #define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S    31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F    CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S    25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S    17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S    16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
 
 #define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
 #define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
@@ -2184,6 +2280,45 @@
 #define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
 
 #define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S    26
+
+#define T6_SRAM_PRIO3_S    23
+#define T6_SRAM_PRIO3_M    0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S    20
+#define T6_SRAM_PRIO2_M    0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S    17
+#define T6_SRAM_PRIO1_M    0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S    14
+#define T6_SRAM_PRIO0_M    0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S    13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F    T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S    12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F    T6_REPLICATE_V(1U)
+
+#define T6_PF_S    9
+#define T6_PF_M    0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S    8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F    T6_VF_VALID_V(1U)
+
+#define T6_VF_S    0
+#define T6_VF_M    0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
 #define MPS_CLS_SRAM_H_A 0xe004
 
 #define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
@@ -2433,6 +2568,8 @@
 #define CIM_F    CIM_V(1U)
 
 #define MC1_S    31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F    MC1_V(1U)
 
 #define PL_INT_ENABLE_A 0x19410
 #define PL_INT_MAP0_A 0x19414
@@ -2463,6 +2600,18 @@
 #define REV_V(x) ((x) << REV_S)
 #define REV_G(x) (((x) >> REV_S) & REV_M)
 
+#define T6_UNKNOWNCMD_S    3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F    T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S    2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F    T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S    1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F    T6_LIPMISS_V(1U)
+
 #define LE_DB_INT_CAUSE_A 0x19c3c
 
 #define REQQPARERR_S    16
@@ -2485,6 +2634,14 @@
 #define LIP0_V(x) ((x) << LIP0_S)
 #define LIP0_F    LIP0_V(1U)
 
+#define TCAMINTPERR_S    13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F    TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S    10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F    SSRAMINTPERR_V(1U)
+
 #define NCSI_INT_CAUSE_A 0x1a0d8
 
 #define CIM_DM_PRTY_ERR_S    8
@@ -2638,6 +2795,33 @@
 
 #define CIM_IBQ_DBG_DATA_A 0x7b68
 #define CIM_OBQ_DBG_DATA_A 0x7b6c
+#define CIM_DEBUGCFG_A 0x7b70
+#define CIM_DEBUGSTS_A 0x7b74
+
+#define POLADBGRDPTR_S		23
+#define POLADBGRDPTR_M		0x1ffU
+#define POLADBGRDPTR_V(x)	((x) << POLADBGRDPTR_S)
+
+#define POLADBGWRPTR_S		16
+#define POLADBGWRPTR_M		0x1ffU
+#define POLADBGWRPTR_G(x)	(((x) >> POLADBGWRPTR_S) & POLADBGWRPTR_M)
+
+#define PILADBGRDPTR_S		14
+#define PILADBGRDPTR_M		0x1ffU
+#define PILADBGRDPTR_V(x)	((x) << PILADBGRDPTR_S)
+
+#define PILADBGWRPTR_S		0
+#define PILADBGWRPTR_M		0x1ffU
+#define PILADBGWRPTR_G(x)	(((x) >> PILADBGWRPTR_S) & PILADBGWRPTR_M)
+
+#define LADBGEN_S	12
+#define LADBGEN_V(x)	((x) << LADBGEN_S)
+#define LADBGEN_F	LADBGEN_V(1U)
+
+#define CIM_PO_LA_DEBUGDATA_A 0x7b78
+#define CIM_PI_LA_DEBUGDATA_A 0x7b7c
+#define CIM_PO_LA_MADEBUGDATA_A	0x7b80
+#define CIM_PI_LA_MADEBUGDATA_A	0x7b84
 
 #define UPDBGLARDEN_S		1
 #define UPDBGLARDEN_V(x)	((x) << UPDBGLARDEN_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 19b2dcf..7bdee3b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -61,6 +61,30 @@
 #define SGE_TIMERREGS			6
 #define TIMERREG_COUNTER0_X		0
 
+#define FETCHBURSTMIN_64B_X		2
+
+#define FETCHBURSTMAX_256B_X		2
+#define FETCHBURSTMAX_512B_X		3
+
+#define HOSTFCMODE_STATUS_PAGE_X	2
+
+#define CIDXFLUSHTHRESH_32_X		5
+
+#define UPDATEDELIVERY_INTERRUPT_X	1
+
+#define RSPD_TYPE_FLBUF_X		0
+#define RSPD_TYPE_CPL_X			1
+#define RSPD_TYPE_INTR_X		2
+
+/* Congestion Manager Definitions.
+ */
+#define CONMCTXT_CNGTPMODE_S		19
+#define CONMCTXT_CNGTPMODE_V(x)		((x) << CONMCTXT_CNGTPMODE_S)
+#define CONMCTXT_CNGCHMAP_S		0
+#define CONMCTXT_CNGCHMAP_V(x)		((x) << CONMCTXT_CNGCHMAP_S)
+#define CONMCTXT_CNGTPMODE_CHANNEL_X	2
+#define CONMCTXT_CNGTPMODE_QUEUE_X	1
+
 /* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
  * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
  * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 03fbfd1..ab46746 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -772,7 +772,7 @@
 		} addrval;
 		struct fw_ldst_idctxt {
 			__be32 physid;
-			__be32 msg_pkd;
+			__be32 msg_ctxtflush;
 			__be32 ctxt_data7;
 			__be32 ctxt_data6;
 			__be32 ctxt_data5;
@@ -788,15 +788,27 @@
 			__be16 vctl;
 			__be16 rval;
 		} mdio;
-		struct fw_ldst_mps {
-			__be16 fid_ctl;
-			__be16 rplcpf_pkd;
-			__be32 rplc127_96;
-			__be32 rplc95_64;
-			__be32 rplc63_32;
-			__be32 rplc31_0;
-			__be32 atrb;
-			__be16 vlan[16];
+		union fw_ldst_mps {
+			struct fw_ldst_mps_rplc {
+				__be16 fid_idx;
+				__be16 rplcpf_pkd;
+				__be32 rplc255_224;
+				__be32 rplc223_192;
+				__be32 rplc191_160;
+				__be32 rplc159_128;
+				__be32 rplc127_96;
+				__be32 rplc95_64;
+				__be32 rplc63_32;
+				__be32 rplc31_0;
+			} rplc;
+			struct fw_ldst_mps_atrb {
+				__be16 fid_mpsid;
+				__be16 r2[3];
+				__be32 r3[2];
+				__be32 r4;
+				__be32 atrb;
+				__be16 vlan[16];
+			} atrb;
 		} mps;
 		struct fw_ldst_func {
 			u8 access_ctl;
@@ -822,6 +834,10 @@
 #define FW_LDST_CMD_MSG_S       31
 #define FW_LDST_CMD_MSG_V(x)	((x) << FW_LDST_CMD_MSG_S)
 
+#define FW_LDST_CMD_CTXTFLUSH_S		30
+#define FW_LDST_CMD_CTXTFLUSH_V(x)	((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F		FW_LDST_CMD_CTXTFLUSH_V(1U)
+
 #define FW_LDST_CMD_PADDR_S     8
 #define FW_LDST_CMD_PADDR_V(x)	((x) << FW_LDST_CMD_PADDR_S)
 
@@ -831,8 +847,8 @@
 #define FW_LDST_CMD_FID_S       15
 #define FW_LDST_CMD_FID_V(x)	((x) << FW_LDST_CMD_FID_S)
 
-#define FW_LDST_CMD_CTL_S       0
-#define FW_LDST_CMD_CTL_V(x)	((x) << FW_LDST_CMD_CTL_S)
+#define FW_LDST_CMD_IDX_S	0
+#define FW_LDST_CMD_IDX_V(x)	((x) << FW_LDST_CMD_IDX_S)
 
 #define FW_LDST_CMD_RPLCPF_S    0
 #define FW_LDST_CMD_RPLCPF_V(x)	((x) << FW_LDST_CMD_RPLCPF_S)
@@ -1061,6 +1077,7 @@
 	FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
 	FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
 	FW_PARAMS_PARAM_DEV_CF = 0x0D,
+	FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
 	FW_PARAMS_PARAM_DEV_DIAG = 0x11,
 	FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
 	FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
@@ -1123,6 +1140,12 @@
 	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
 	FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
 	FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+	FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_phyfw {
+	FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+	FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
 };
 
 enum fw_params_param_dev_diag {
@@ -1377,6 +1400,7 @@
 
 #define FW_IQ_CMD_IQFLINTCONGEN_S	27
 #define FW_IQ_CMD_IQFLINTCONGEN_V(x)	((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+#define FW_IQ_CMD_IQFLINTCONGEN_F	FW_IQ_CMD_IQFLINTCONGEN_V(1U)
 
 #define FW_IQ_CMD_IQFLINTISCSIC_S	26
 #define FW_IQ_CMD_IQFLINTISCSIC_V(x)	((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
@@ -1399,6 +1423,7 @@
 
 #define FW_IQ_CMD_FL0CONGCIF_S		11
 #define FW_IQ_CMD_FL0CONGCIF_V(x)	((x) << FW_IQ_CMD_FL0CONGCIF_S)
+#define FW_IQ_CMD_FL0CONGCIF_F		FW_IQ_CMD_FL0CONGCIF_V(1U)
 
 #define FW_IQ_CMD_FL0ONCHIP_S		10
 #define FW_IQ_CMD_FL0ONCHIP_V(x)	((x) << FW_IQ_CMD_FL0ONCHIP_S)
@@ -1589,6 +1614,7 @@
 
 #define FW_EQ_ETH_CMD_FETCHRO_S		22
 #define FW_EQ_ETH_CMD_FETCHRO_V(x)	((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+#define FW_EQ_ETH_CMD_FETCHRO_F		FW_EQ_ETH_CMD_FETCHRO_V(1U)
 
 #define FW_EQ_ETH_CMD_HOSTFCMODE_S	20
 #define FW_EQ_ETH_CMD_HOSTFCMODE_V(x)	((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
@@ -2526,13 +2552,8 @@
 	FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
 };
 
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
 enum fw_port_stats_tx_index {
-	FW_STAT_TX_PORT_BYTES_IX,
+	FW_STAT_TX_PORT_BYTES_IX = 0,
 	FW_STAT_TX_PORT_FRAMES_IX,
 	FW_STAT_TX_PORT_BCAST_IX,
 	FW_STAT_TX_PORT_MCAST_IX,
@@ -2554,11 +2575,12 @@
 	FW_STAT_TX_PORT_PPP4_IX,
 	FW_STAT_TX_PORT_PPP5_IX,
 	FW_STAT_TX_PORT_PPP6_IX,
-	FW_STAT_TX_PORT_PPP7_IX
+	FW_STAT_TX_PORT_PPP7_IX,
+	FW_NUM_PORT_TX_STATS
 };
 
 enum fw_port_stat_rx_index {
-	FW_STAT_RX_PORT_BYTES_IX,
+	FW_STAT_RX_PORT_BYTES_IX = 0,
 	FW_STAT_RX_PORT_FRAMES_IX,
 	FW_STAT_RX_PORT_BCAST_IX,
 	FW_STAT_RX_PORT_MCAST_IX,
@@ -2584,9 +2606,14 @@
 	FW_STAT_RX_PORT_PPP5_IX,
 	FW_STAT_RX_PORT_PPP6_IX,
 	FW_STAT_RX_PORT_PPP7_IX,
-	FW_STAT_RX_PORT_LESS_64B_IX
+	FW_STAT_RX_PORT_LESS_64B_IX,
+	FW_STAT_RX_PORT_MAC_ERROR_IX,
+	FW_NUM_PORT_RX_STATS
 };
 
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
 struct fw_port_stats_cmd {
 	__be32 op_to_portid;
 	__be32 retval_len16;
@@ -3015,7 +3042,8 @@
 
 enum fw_hdr_chip {
 	FW_HDR_CHIP_T4,
-	FW_HDR_CHIP_T5
+	FW_HDR_CHIP_T5,
+	FW_HDR_CHIP_T6
 };
 
 #define FW_HDR_FW_VER_MAJOR_S	24
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index b9d1cba..32b2135 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -45,4 +45,9 @@
 #define T5FW_VERSION_MICRO 0x20
 #define T5FW_VERSION_BUILD 0x00
 
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0D
+#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_BUILD 0x00
+
 #endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1d893b0..b2b5e5b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1021,7 +1021,7 @@
 static unsigned int qtimer_val(const struct adapter *adapter,
 			       const struct sge_rspq *rspq)
 {
-	unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
 
 	return timer_idx < SGE_NTIMERS
 		? adapter->sge.timer_val[timer_idx]
@@ -1086,8 +1086,8 @@
 	 * Update the response queue's interrupt coalescing parameters and
 	 * return success.
 	 */
-	rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
-			     (cnt > 0 ? QINTR_CNT_EN : 0));
+	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+			     QINTR_CNT_EN_V(cnt > 0));
 	return 0;
 }
 
@@ -1439,7 +1439,7 @@
 
 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
 	coalesce->rx_max_coalesced_frames =
-		((rspq->intr_params & QINTR_CNT_EN)
+		((rspq->intr_params & QINTR_CNT_EN_F)
 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
 		 : 0);
 	return 0;
@@ -2393,8 +2393,9 @@
 			     u8 pkt_cnt_idx, unsigned int size,
 			     unsigned int iqe_size)
 {
-	rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
-			     (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+			     (pkt_cnt_idx < SGE_NCOUNTERS ?
+			      QINTR_CNT_EN_F : 0));
 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
 			    ? pkt_cnt_idx
 			    : 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 482f6de..ad53e5a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -524,7 +524,7 @@
  */
 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
 {
-	u32 val;
+	u32 val = adapter->params.arch.sge_fl_db;
 
 	/* The SGE keeps track of its Producer and Consumer Indices in terms
 	 * of Egress Queue Units so we can only tell it about integral numbers
@@ -532,11 +532,9 @@
 	 */
 	if (fl->pend_cred >= FL_PER_EQ_UNIT) {
 		if (is_t4(adapter->params.chip))
-			val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+			val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
 		else
-			val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
-			      DBTYPE_F;
-		val |= DBPRIO_F;
+			val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
 
 		/* Make sure all memory writes to the Free List queue are
 		 * committed before we tell the hardware about them.
@@ -1084,7 +1082,7 @@
  * Figure out what HW csum a packet wants and return the appropriate control
  * bits.
  */
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
 {
 	int csum_type;
 	const struct iphdr *iph = ip_hdr(skb);
@@ -1100,7 +1098,7 @@
 			 * unknown protocol, disable HW csum
 			 * and hope a bad packet is detected
 			 */
-			return TXPKT_L4CSUM_DIS;
+			return TXPKT_L4CSUM_DIS_F;
 		}
 	} else {
 		/*
@@ -1116,16 +1114,21 @@
 			goto nocsum;
 	}
 
-	if (likely(csum_type >= TX_CSUM_TCPIP))
-		return TXPKT_CSUM_TYPE(csum_type) |
-			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
-			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
-	else {
+	if (likely(csum_type >= TX_CSUM_TCPIP)) {
+		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+		if (chip <= CHELSIO_T5)
+			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		else
+			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+	} else {
 		int start = skb_transport_offset(skb);
 
-		return TXPKT_CSUM_TYPE(csum_type) |
-			TXPKT_CSUM_START(start) |
-			TXPKT_CSUM_LOC(start + skb->csum_offset);
+		return TXPKT_CSUM_TYPE_V(csum_type) |
+			TXPKT_CSUM_START_V(start) |
+			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
 	}
 }
 
@@ -1160,7 +1163,7 @@
 {
 	u32 wr_mid;
 	u64 cntrl, *end;
-	int qidx, credits;
+	int qidx, credits, max_pkt_len;
 	unsigned int flits, ndesc;
 	struct adapter *adapter;
 	struct sge_eth_txq *txq;
@@ -1183,6 +1186,13 @@
 	if (unlikely(skb->len < fw_hdr_copy_len))
 		goto out_free;
 
+	/* Discard the packet if the length is greater than mtu */
+	max_pkt_len = ETH_HLEN + dev->mtu;
+	if (skb_vlan_tag_present(skb))
+		max_pkt_len += VLAN_HLEN;
+	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+		goto out_free;
+
 	/*
 	 * Figure out which TX Queue we're going to use.
 	 */
@@ -1281,29 +1291,35 @@
 		 * Fill in the LSO CPL message.
 		 */
 		lso->lso_ctrl =
-			cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
-				    LSO_FIRST_SLICE |
-				    LSO_LAST_SLICE |
-				    LSO_IPV6(v6) |
-				    LSO_ETHHDR_LEN(eth_xtra_len/4) |
-				    LSO_IPHDR_LEN(l3hdr_len/4) |
-				    LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+			cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+				    LSO_FIRST_SLICE_F |
+				    LSO_LAST_SLICE_F |
+				    LSO_IPV6_V(v6) |
+				    LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+				    LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
 		lso->ipid_ofst = cpu_to_be16(0);
 		lso->mss = cpu_to_be16(ssi->gso_size);
 		lso->seqno_offset = cpu_to_be32(0);
 		if (is_t4(adapter->params.chip))
 			lso->len = cpu_to_be32(skb->len);
 		else
-			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
+			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
 
 		/*
 		 * Set up TX Packet CPL pointer, control word and perform
 		 * accounting.
 		 */
 		cpl = (void *)(lso + 1);
-		cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
-			 TXPKT_IPHDR_LEN(l3hdr_len) |
-			 TXPKT_ETHHDR_LEN(eth_xtra_len));
+
+		if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+			cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+		else
+			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+			 TXPKT_IPHDR_LEN_V(l3hdr_len);
 		txq->tso++;
 		txq->tx_cso += ssi->gso_segs;
 	} else {
@@ -1320,10 +1336,11 @@
 		 */
 		cpl = (void *)(wr + 1);
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+			cntrl = hwcsum(adapter->params.chip, skb) |
+				TXPKT_IPCSUM_DIS_F;
 			txq->tx_cso++;
 		} else
-			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+			cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
 	}
 
 	/*
@@ -1332,15 +1349,15 @@
 	 */
 	if (skb_vlan_tag_present(skb)) {
 		txq->vlan_ins++;
-		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
 	}
 
 	/*
 	 * Fill in the TX Packet CPL message header.
 	 */
-	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
-				 TXPKT_INTF(pi->port_id) |
-				 TXPKT_PF(0));
+	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+				 TXPKT_INTF_V(pi->port_id) |
+				 TXPKT_PF_V(0));
 	cpl->pack = cpu_to_be16(0);
 	cpl->len = cpu_to_be16(skb->len);
 	cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1663,7 +1680,7 @@
 static inline bool is_new_response(const struct rsp_ctrl *rc,
 				   const struct sge_rspq *rspq)
 {
-	return RSPD_GEN(rc->type_gen) == rspq->gen;
+	return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
 }
 
 /**
@@ -1752,8 +1769,8 @@
 		 * SGE.
 		 */
 		dma_rmb();
-		rsp_type = RSPD_TYPE(rc->type_gen);
-		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+		rsp_type = RSPD_TYPE_G(rc->type_gen);
+		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
 			struct page_frag *fp;
 			struct pkt_gl gl;
 			const struct rx_sw_desc *sdesc;
@@ -1764,7 +1781,7 @@
 			 * If we get a "new buffer" message from the SGE we
 			 * need to move on to the next Free List buffer.
 			 */
-			if (len & RSPD_NEWBUF) {
+			if (len & RSPD_NEWBUF_F) {
 				/*
 				 * We get one "new buffer" message when we
 				 * first start up a queue so we need to ignore
@@ -1775,7 +1792,7 @@
 						     1);
 					rspq->offset = 0;
 				}
-				len = RSPD_LEN(len);
+				len = RSPD_LEN_G(len);
 			}
 			gl.tot_len = len;
 
@@ -1818,10 +1835,10 @@
 				rspq->offset += ALIGN(fp->size, s->fl_align);
 			else
 				restore_rx_bufs(&gl, &rxq->fl, frag);
-		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
+		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
 			ret = rspq->handler(rspq, rspq->cur_desc, NULL);
 		} else {
-			WARN_ON(rsp_type > RSP_TYPE_CPL);
+			WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
 			ret = 0;
 		}
 
@@ -1833,7 +1850,7 @@
 			 */
 			const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
 			rspq->next_intr_params =
-				QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+				QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
 			break;
 		}
 
@@ -1875,7 +1892,7 @@
 		intr_params = rspq->next_intr_params;
 		rspq->next_intr_params = rspq->intr_params;
 	} else
-		intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+		intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
 
 	if (unlikely(work_done == 0))
 		rspq->unhandled_irqs++;
@@ -1936,10 +1953,10 @@
 		 * never happen ...
 		 */
 		dma_rmb();
-		if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+		if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
 			dev_err(adapter->pdev_dev,
 				"Unexpected INTRQ response type %d\n",
-				RSPD_TYPE(rc->type_gen));
+				RSPD_TYPE_G(rc->type_gen));
 			continue;
 		}
 
@@ -1951,7 +1968,7 @@
 		 * want to either make them fatal and/or conditionalized under
 		 * DEBUG.
 		 */
-		qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+		qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
 		iq_idx = IQ_IDX(s, qid);
 		if (unlikely(iq_idx >= MAX_INGQ)) {
 			dev_err(adapter->pdev_dev,
@@ -2154,8 +2171,8 @@
 	u64 bar2_qoffset;
 	int ret;
 
-	ret = t4_bar2_sge_qregs(adapter, qid, qtype,
-				&bar2_qoffset, pbar2_qid);
+	ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+				  &bar2_qoffset, pbar2_qid);
 	if (ret)
 		return NULL;
 
@@ -2239,12 +2256,18 @@
 	cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
 
 	if (fl) {
+		enum chip_type chip =
+			CHELSIO_CHIP_VERSION(adapter->params.chip);
 		/*
 		 * Allocate the ring for the hardware free list (with space
 		 * for its status page) along with the associated software
 		 * descriptor ring.  The free list size needs to be a multiple
-		 * of the Egress Queue Unit.
+		 * of the Egress Queue Unit and at least 2 Egress Units larger
+		 * than the SGE's Egress Congrestion Threshold
+		 * (fl_starve_thres - 1).
 		 */
+		if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
+			fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
 		fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
 		fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
 				      sizeof(__be64), sizeof(struct rx_sw_desc),
@@ -2274,7 +2297,9 @@
 		cmd.fl0dcaen_to_fl0cidxfthresh =
 			cpu_to_be16(
 				FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
-				FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
+				FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+						     FETCHBURSTMAX_512B_X :
+						     FETCHBURSTMAX_256B_X));
 		cmd.fl0size = cpu_to_be16(flsz);
 		cmd.fl0addr = cpu_to_be64(fl->addr);
 	}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index b9debb4..88b8981b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -51,6 +51,7 @@
  */
 #define CHELSIO_T4		0x4
 #define CHELSIO_T5		0x5
+#define CHELSIO_T6		0x6
 
 enum chip_type {
 	T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -156,6 +157,12 @@
 	u32 cclk;			/* Core Clock (KHz) */
 };
 
+/* Stores chip specific parameters */
+struct arch_specific_params {
+	u32 sge_fl_db;
+	u16 mps_tcam_size;
+};
+
 /*
  * Global Receive Side Scaling (RSS) parameters in host-native format.
  */
@@ -215,6 +222,7 @@
 	struct vpd_params vpd;		/* Vital Product Data */
 	struct rss_params rss;		/* Receive Side Scaling */
 	struct vf_resources vfres;	/* Virtual Function Resource limits */
+	struct arch_specific_params arch; /* chip specific params */
 	enum chip_type chip;		/* chip code */
 	u8 nports;			/* # of Ethernet "ports" */
 };
@@ -284,11 +292,11 @@
 int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
 
 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int t4_bar2_sge_qregs(struct adapter *adapter,
-		      unsigned int qid,
-		      enum t4_bar2_qtype qtype,
-		      u64 *pbar2_qoffset,
-		      unsigned int *pbar2_qid);
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+			unsigned int qid,
+			enum t4_bar2_qtype qtype,
+			u64 *pbar2_qoffset,
+			unsigned int *pbar2_qid);
 
 int t4vf_get_sge_params(struct adapter *);
 int t4vf_get_vpd_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 966ee90..0db6dc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -428,7 +428,7 @@
 }
 
 /**
- *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ *	t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
  *	@adapter: the adapter
  *	@qid: the Queue ID
  *	@qtype: the Ingress or Egress type for @qid
@@ -452,11 +452,11 @@
  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  *	then these "Inferred Queue ID" register may not be used.
  */
-int t4_bar2_sge_qregs(struct adapter *adapter,
-		      unsigned int qid,
-		      enum t4_bar2_qtype qtype,
-		      u64 *pbar2_qoffset,
-		      unsigned int *pbar2_qid)
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+			unsigned int qid,
+			enum t4_bar2_qtype qtype,
+			u64 *pbar2_qoffset,
+			unsigned int *pbar2_qid)
 {
 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
 	u64 bar2_page_offset, bar2_qoffset;
@@ -1191,9 +1191,7 @@
 	unsigned nfilters = 0;
 	unsigned int rem = naddr;
 	struct fw_vi_mac_cmd cmd, rpl;
-	unsigned int max_naddr = is_t4(adapter->params.chip) ?
-				 NUM_MPS_CLS_SRAM_L_INSTANCES :
-				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+	unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
 
 	if (naddr > max_naddr)
 		return -EINVAL;
@@ -1285,9 +1283,7 @@
 	struct fw_vi_mac_exact *p = &cmd.u.exact[0];
 	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
 					     u.exact[1]), 16);
-	unsigned int max_naddr = is_t4(adapter->params.chip) ?
-				 NUM_MPS_CLS_SRAM_L_INSTANCES :
-				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+	unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
 
 	/*
 	 * If this is a new allocation, determine whether it should be
@@ -1310,7 +1306,7 @@
 	if (ret == 0) {
 		p = &rpl.u.exact[0];
 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
-		if (ret >= max_naddr)
+		if (ret >= max_mac_addr)
 			ret = -ENOMEM;
 	}
 	return ret;
@@ -1590,11 +1586,25 @@
 	switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
 	case CHELSIO_T4:
 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+		adapter->params.arch.sge_fl_db = DBPRIO_F;
+		adapter->params.arch.mps_tcam_size =
+				NUM_MPS_CLS_SRAM_L_INSTANCES;
 		break;
 
 	case CHELSIO_T5:
 		chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+		adapter->params.arch.mps_tcam_size =
+				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		break;
+
+	case CHELSIO_T6:
+		chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+		adapter->params.arch.sge_fl_db = 0;
+		adapter->params.arch.mps_tcam_size =
+				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
 		break;
 	}
 
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 905ac5f..5ab9129 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on ISA || EISA || ARM || MAC
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -23,9 +21,7 @@
 	depends on ISA || EISA || ARM
 	---help---
 	  Support for CS89x0 chipset based Ethernet cards. If you have a
-	  network (Ethernet) card of this type, say Y and read the
-	  Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto> as well as
+	  network (Ethernet) card of this type, say Y and read the file
 	  <file:Documentation/networking/cs89x0.txt>.
 
 	  To compile this driver as a module, choose M here. The module
@@ -55,9 +51,7 @@
 	depends on MAC
 	---help---
 	  Support for CS89x0 chipset based Ethernet cards.  If you have a
-	  Nubus or LC-PDS network (Ethernet) card of this type, say Y and
-	  read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  Nubus or LC-PDS network (Ethernet) card of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. This module will
 	  be called mac89x0.
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
index 1c7b884..15b713a 100644
--- a/drivers/net/ethernet/cisco/Kconfig
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 0be6850..d106186 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -5,7 +5,7 @@
 #include <linux/in.h>
 #include <linux/types.h>
 #include <linux/skbuff.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 #include "enic_res.h"
 #include "enic_clsf.h"
 
@@ -15,14 +15,14 @@
  *	@rq: rq number to steer to
  *
  * This function returns filter_id(hardware_id) of the filter
- * added. In case of error it returns an negative number.
+ * added. In case of error it returns a negative number.
  */
 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
 {
 	int res;
 	struct filter data;
 
-	switch (keys->ip_proto) {
+	switch (keys->basic.ip_proto) {
 	case IPPROTO_TCP:
 		data.u.ipv4.protocol = PROTO_TCP;
 		break;
@@ -33,10 +33,10 @@
 		return -EPROTONOSUPPORT;
 	};
 	data.type = FILTER_IPV4_5TUPLE;
-	data.u.ipv4.src_addr = ntohl(keys->src);
-	data.u.ipv4.dst_addr = ntohl(keys->dst);
-	data.u.ipv4.src_port = ntohs(keys->port16[0]);
-	data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+	data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
+	data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
+	data.u.ipv4.src_port = ntohs(keys->ports.src);
+	data.u.ipv4.dst_port = ntohs(keys->ports.dst);
 	data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 
 	spin_lock_bh(&enic->devcmd_lock);
@@ -158,11 +158,11 @@
 	struct enic_rfs_fltr_node *tpos;
 
 	hlist_for_each_entry(tpos, h, node)
-		if (tpos->keys.src == k->src &&
-		    tpos->keys.dst == k->dst &&
-		    tpos->keys.ports == k->ports &&
-		    tpos->keys.ip_proto == k->ip_proto &&
-		    tpos->keys.n_proto == k->n_proto)
+		if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
+		    tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
+		    tpos->keys.ports.ports == k->ports.ports &&
+		    tpos->keys.basic.ip_proto == k->basic.ip_proto &&
+		    tpos->keys.basic.n_proto == k->basic.n_proto)
 			return tpos;
 	return NULL;
 }
@@ -177,9 +177,10 @@
 	int res, i;
 
 	enic = netdev_priv(dev);
-	res = skb_flow_dissect(skb, &keys);
-	if (!res || keys.n_proto != htons(ETH_P_IP) ||
-	    (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+	res = skb_flow_dissect_flow_keys(skb, &keys);
+	if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
+	    (keys.basic.ip_proto != IPPROTO_TCP &&
+	     keys.basic.ip_proto != IPPROTO_UDP))
 		return -EPROTONOSUPPORT;
 
 	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 68d47b1..f3f1601 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -348,7 +348,7 @@
 	n = htbl_fltr_search(enic, (u16)fsp->location);
 	if (!n)
 		return -EINVAL;
-	switch (n->keys.ip_proto) {
+	switch (n->keys.basic.ip_proto) {
 	case IPPROTO_TCP:
 		fsp->flow_type = TCP_V4_FLOW;
 		break;
@@ -360,16 +360,16 @@
 		break;
 	}
 
-	fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
 
-	fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
 
-	fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
 
-	fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
 
 	fsp->ring_cookie = n->rq_id;
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
index 68262aa..740bbad 100644
--- a/drivers/net/ethernet/dec/Kconfig
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI || EISA || CARDBUS
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index eb9ba6e..1003201b 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -21,8 +21,7 @@
 	  of this type.  (If your card is NOT SMC EtherPower 10/100 PCI
 	  (smc9332dst), you can also try the driver for "Generic DECchip"
 	  cards, below.  However, most people with a network card of this type
-	  will say Y here.) Do read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  will say Y here.)
 
 	  To compile this driver as a module, choose M here. The module will
 	  be called de2104x.
@@ -50,8 +49,7 @@
 	  of this type.  (If your card is NOT SMC EtherPower 10/100 PCI
 	  (smc9332dst), you can also try the driver for "Generic DECchip"
 	  cards, above.  However, most people with a network card of this type
-	  will say Y here.) Do read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  will say Y here.)
 
 	  To compile this driver as a module, choose M here. The module will
 	  be called tulip.
@@ -113,9 +111,7 @@
 	---help---
 	  This is support for the DIGITAL series of PCI/EISA Ethernet cards.
 	  These include the DE425, DE434, DE435, DE450 and DE500 models.  If
-	  you have a network card of this type, say Y and read the
-	  Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>. More specific
+	  you have a network card of this type, say Y.  More specific
 	  information is contained in
 	  <file:Documentation/networking/de4x5.txt>.
 
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index badff18..8966f31 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -5189,16 +5189,16 @@
 	if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
 
 	if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
-	    if (strstr(p, "TP")) {
-		lp->params.autosense = TP;
-	    } else if (strstr(p, "TP_NW")) {
+	    if (strstr(p, "TP_NW")) {
 		lp->params.autosense = TP_NW;
+	    } else if (strstr(p, "TP")) {
+		lp->params.autosense = TP;
+	    } else if (strstr(p, "BNC_AUI")) {
+		lp->params.autosense = BNC;
 	    } else if (strstr(p, "BNC")) {
 		lp->params.autosense = BNC;
 	    } else if (strstr(p, "AUI")) {
 		lp->params.autosense = AUI;
-	    } else if (strstr(p, "BNC_AUI")) {
-		lp->params.autosense = BNC;
 	    } else if (strstr(p, "10Mb")) {
 		lp->params.autosense = _10Mb;
 	    } else if (strstr(p, "100Mb")) {
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 2c30c0c..447d092 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1115,7 +1115,7 @@
 				netif_carrier_off(dev);
 			}
 		}
-		db->init=0;
+	db->init = 0;
 
 	/* Timer active again */
 	db->timer.expires = ULI526X_TIMER_WUT;
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index c543ac1..f6e858d 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1274b6f..cf0a5fc 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -463,10 +463,8 @@
 		dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
 	}
 
-	init_timer (&np->timer);
+	setup_timer(&np->timer, rio_timer, (unsigned long)dev);
 	np->timer.expires = jiffies + 1*HZ;
-	np->timer.data = (unsigned long) dev;
-	np->timer.function = rio_timer;
 	add_timer (&np->timer);
 
 	/* Start Tx/Rx */
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
index 1b8d638..fdbb27c 100644
--- a/drivers/net/ethernet/emulex/Kconfig
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index ea94a8e..7108563 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -5,6 +5,15 @@
 	  This driver implements the NIC functionality for ServerEngines'
 	  10Gbps network adapter - BladeEngine.
 
+config BE2NET_HWMON
+	bool "HWMON support for be2net driver"
+	depends on BE2NET && HWMON
+	depends on !(BE2NET=y && HWMON=m)
+	default y
+	---help---
+	  Say Y here if you want to expose thermal sensor data on
+	  be2net network adapter.
+
 config BE2NET_VXLAN
         bool "VXLAN offload support on be2net driver"
         default y
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 1bf1cdc..8d12b41 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -31,11 +31,13 @@
 #include <linux/slab.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/cpumask.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER			"10.6.0.1"
+#define DRV_VER			"10.6.0.2"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"Emulex BladeEngine2"
 #define BE3_NAME		"Emulex BladeEngine3"
@@ -314,7 +316,6 @@
 } ____cacheline_aligned_in_smp;
 
 struct be_drv_stats {
-	u32 be_on_die_temperature;
 	u32 eth_red_drops;
 	u32 dma_map_errors;
 	u32 rx_drops_no_pbuf;
@@ -366,6 +367,7 @@
 	u32 tx_rate;
 	u32 plink_tracking;
 	u32 privileges;
+	bool spoofchk;
 };
 
 enum vf_state {
@@ -382,6 +384,7 @@
 #define BE_FLAGS_SETUP_DONE			BIT(9)
 #define BE_FLAGS_EVT_INCOMPATIBLE_SFP		BIT(10)
 #define BE_FLAGS_ERR_DETECTION_SCHEDULED	BIT(11)
+#define BE_FLAGS_OS2BMC				BIT(12)
 
 #define BE_UC_PMAC_COUNT			30
 #define BE_VF_UC_PMAC_COUNT			2
@@ -426,6 +429,8 @@
 	u32 vf_if_cap_flags;	/* VF if capability flags */
 };
 
+#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
+
 struct rss_info {
 	u64 rss_flags;
 	u8 rsstable[RSS_INDIR_TABLE_LEN];
@@ -433,6 +438,12 @@
 	u8 rss_hkey[RSS_HASH_KEY_LEN];
 };
 
+#define BE_INVALID_DIE_TEMP	0xFF
+struct be_hwmon {
+	struct device *hwmon_dev;
+	u8 be_on_die_temp;  /* Unit: millidegree Celsius */
+};
+
 /* Macros to read/write the 'features' word of be_wrb_params structure.
  */
 #define	BE_WRB_F_BIT(name)			BE_WRB_F_##name##_BIT
@@ -453,7 +464,8 @@
 	BE_WRB_F_LSO_BIT,		/* LSO */
 	BE_WRB_F_LSO6_BIT,		/* LSO6 */
 	BE_WRB_F_VLAN_BIT,		/* VLAN */
-	BE_WRB_F_VLAN_SKIP_HW_BIT	/* Skip VLAN tag (workaround) */
+	BE_WRB_F_VLAN_SKIP_HW_BIT,	/* Skip VLAN tag (workaround) */
+	BE_WRB_F_OS2BMC_BIT		/* Send packet to the management ring */
 };
 
 /* The structure below provides a HW-agnostic abstraction of WRB params
@@ -514,6 +526,7 @@
 	u16 work_counter;
 
 	struct delayed_work be_err_detection_work;
+	u8 err_flags;
 	u32 flags;
 	u32 cmd_privileges;
 	/* Ethtool knobs and info */
@@ -572,8 +585,11 @@
 	u16 qnq_vid;
 	u32 msg_enable;
 	int be_get_temp_freq;
+	struct be_hwmon hwmon_info;
 	u8 pf_number;
 	struct rss_info rss_info;
+	/* Filters for packets that need to be sent to BMC */
+	u32 bmc_filt_mask;
 };
 
 #define be_physfn(adapter)		(!adapter->virtfn)
@@ -772,28 +788,38 @@
 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
 }
 
+#define BE_ERROR_EEH		1
+#define BE_ERROR_UE		BIT(1)
+#define BE_ERROR_FW		BIT(2)
+#define BE_ERROR_HW		(BE_ERROR_EEH | BE_ERROR_UE)
+#define BE_ERROR_ANY		(BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW)
+#define BE_CLEAR_ALL		0xFF
+
+static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
+{
+	return (adapter->err_flags & err_type);
+}
+
+static inline void be_set_error(struct be_adapter *adapter, int err_type)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	adapter->err_flags |= err_type;
+	netif_carrier_off(netdev);
+
+	dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+}
+
+static inline void  be_clear_error(struct be_adapter *adapter, int err_type)
+{
+	adapter->err_flags &= ~err_type;
+}
+
 static inline bool be_multi_rxq(const struct be_adapter *adapter)
 {
 	return adapter->num_rx_qs > 1;
 }
 
-static inline bool be_error(struct be_adapter *adapter)
-{
-	return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
-}
-
-static inline bool be_hw_error(struct be_adapter *adapter)
-{
-	return adapter->eeh_error || adapter->hw_error;
-}
-
-static inline void  be_clear_all_error(struct be_adapter *adapter)
-{
-	adapter->eeh_error = false;
-	adapter->hw_error = false;
-	adapter->fw_timeout = false;
-}
-
 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 		  u16 num_popped);
 void be_link_status_update(struct be_adapter *adapter, u8 link_status);
@@ -804,6 +830,7 @@
 u32 be_get_fw_log_level(struct be_adapter *adapter);
 int be_update_queues(struct be_adapter *adapter);
 int be_poll(struct napi_struct *napi, int budget);
+void be_eqd_update(struct be_adapter *adapter, bool force_update);
 
 /*
  * internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c5e1d0a..9eac322 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -93,7 +93,7 @@
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	u32 val = 0;
 
-	if (be_error(adapter))
+	if (be_check_error(adapter, BE_ERROR_ANY))
 		return;
 
 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
@@ -140,6 +140,7 @@
 	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
 	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
 	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+	    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
 	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
 	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
 	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
@@ -191,10 +192,12 @@
 		if (base_status == MCC_STATUS_SUCCESS) {
 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
 							(void *)resp_hdr;
-			adapter->drv_stats.be_on_die_temperature =
+			adapter->hwmon_info.be_on_die_temp =
 						resp->on_die_temperature;
 		} else {
 			adapter->be_get_temp_freq = 0;
+			adapter->hwmon_info.be_on_die_temp =
+						BE_INVALID_DIE_TEMP;
 		}
 		return;
 	}
@@ -330,6 +333,21 @@
 	}
 }
 
+#define MGMT_ENABLE_MASK	0x4
+static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
+					     struct be_mcc_compl *compl)
+{
+	struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
+	u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
+
+	if (evt_dw1 & MGMT_ENABLE_MASK) {
+		adapter->flags |= BE_FLAGS_OS2BMC;
+		adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
+	} else {
+		adapter->flags &= ~BE_FLAGS_OS2BMC;
+	}
+}
+
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
 				      struct be_mcc_compl *compl)
 {
@@ -346,6 +364,10 @@
 	case ASYNC_EVENT_PVID_STATE:
 		be_async_grp5_pvid_state_process(adapter, compl);
 		break;
+	/* Async event to disable/enable os2bmc and/or mac-learning */
+	case ASYNC_EVENT_FW_CONTROL:
+		be_async_grp5_fw_control_process(adapter, compl);
+		break;
 	default:
 		break;
 	}
@@ -486,7 +508,7 @@
 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
 	for (i = 0; i < mcc_timeout; i++) {
-		if (be_error(adapter))
+		if (be_check_error(adapter, BE_ERROR_ANY))
 			return -EIO;
 
 		local_bh_disable();
@@ -499,7 +521,7 @@
 	}
 	if (i == mcc_timeout) {
 		dev_err(&adapter->pdev->dev, "FW not responding\n");
-		adapter->fw_timeout = true;
+		be_set_error(adapter, BE_ERROR_FW);
 		return -EIO;
 	}
 	return status;
@@ -538,7 +560,7 @@
 	u32 ready;
 
 	do {
-		if (be_error(adapter))
+		if (be_check_error(adapter, BE_ERROR_ANY))
 			return -EIO;
 
 		ready = ioread32(db);
@@ -551,7 +573,7 @@
 
 		if (msecs > 4000) {
 			dev_err(&adapter->pdev->dev, "FW not responding\n");
-			adapter->fw_timeout = true;
+			be_set_error(adapter, BE_ERROR_FW);
 			be_detect_error(adapter);
 			return -1;
 		}
@@ -1457,7 +1479,7 @@
 		*if_handle = le32_to_cpu(resp->interface_id);
 
 		/* Hack to retrieve VF's pmac-id on BE3 */
-		if (BE3_chip(adapter) && !be_physfn(adapter))
+		if (BE3_chip(adapter) && be_virtfn(adapter))
 			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
 	}
 	return status;
@@ -3156,7 +3178,7 @@
 }
 
 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
-			  u32 domain, u16 intf_id, u16 hsw_mode)
+			  u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_set_hsw_config *req;
@@ -3192,6 +3214,14 @@
 			      ctxt, hsw_mode);
 	}
 
+	/* Enable/disable both mac and vlan spoof checking */
+	if (!BEx_chip(adapter) && spoofchk) {
+		AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
+			      ctxt, spoofchk);
+		AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
+			      ctxt, spoofchk);
+	}
+
 	be_dws_cpu_to_le(req->context, sizeof(req->context));
 	status = be_mcc_notify_wait(adapter);
 
@@ -3202,7 +3232,7 @@
 
 /* Get Hyper switch config */
 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
-			  u32 domain, u16 intf_id, u8 *mode)
+			  u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_get_hsw_config *req;
@@ -3250,6 +3280,10 @@
 		if (mode)
 			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
 					      port_fwd_type, &resp->context);
+		if (spoofchk)
+			*spoofchk =
+				AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+					      spoofchk, &resp->context);
 	}
 
 err:
@@ -3261,7 +3295,7 @@
 {
 	struct pci_dev *pdev = adapter->pdev;
 
-	if (!be_physfn(adapter))
+	if (be_virtfn(adapter))
 		return true;
 
 	switch (pdev->subsystem_device) {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1ec2230..2716e6f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -65,7 +65,8 @@
 enum mcc_addl_status {
 	MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
 	MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
-	MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+	MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
+	MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab
 };
 
 #define CQE_BASE_STATUS_MASK		0xFFFF
@@ -104,6 +105,7 @@
 #define ASYNC_DEBUG_EVENT_TYPE_QNQ	1
 #define ASYNC_EVENT_CODE_SLIPORT	0x11
 #define ASYNC_EVENT_PORT_MISCONFIG	0x9
+#define ASYNC_EVENT_FW_CONTROL		0x5
 
 enum {
 	LINK_DOWN	= 0x0,
@@ -180,6 +182,22 @@
 	u32 flags;
 } __packed;
 
+#define BMC_FILT_BROADCAST_ARP				BIT(0)
+#define BMC_FILT_BROADCAST_DHCP_CLIENT			BIT(1)
+#define BMC_FILT_BROADCAST_DHCP_SERVER			BIT(2)
+#define BMC_FILT_BROADCAST_NET_BIOS			BIT(3)
+#define BMC_FILT_BROADCAST				BIT(7)
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER		BIT(8)
+#define BMC_FILT_MULTICAST_IPV6_RA			BIT(9)
+#define BMC_FILT_MULTICAST_IPV6_RAS			BIT(10)
+#define BMC_FILT_MULTICAST				BIT(15)
+struct be_async_fw_control {
+	u32 event_data_word1;
+	u32 event_data_word2;
+	u32 evt_tag;
+	u32 event_data_word4;
+} __packed;
+
 struct be_mcc_mailbox {
 	struct be_mcc_wrb wrb;
 	struct be_mcc_compl compl;
@@ -1109,10 +1127,6 @@
 	u32 rsvd[31];
 };
 
-/* ASIC revisions */
-#define ASIC_REV_B0		0x10
-#define ASIC_REV_P2		0x11
-
 struct be_cmd_resp_query_fw_cfg {
 	struct be_cmd_resp_hdr hdr;
 	u32 be_config_number;
@@ -1745,18 +1759,24 @@
 #define PORT_FWD_TYPE_VEPA		0x3
 #define PORT_FWD_TYPE_VEB		0x2
 
+#define ENABLE_MAC_SPOOFCHK		0x2
+#define DISABLE_MAC_SPOOFCHK		0x3
+
 struct amap_set_hsw_context {
 	u8 interface_id[16];
-	u8 rsvd0[14];
+	u8 rsvd0[8];
+	u8 mac_spoofchk[2];
+	u8 rsvd1[4];
 	u8 pvid_valid;
 	u8 pport;
-	u8 rsvd1[6];
+	u8 rsvd2[6];
 	u8 port_fwd_type[3];
-	u8 rsvd2[7];
+	u8 rsvd3[5];
+	u8 vlan_spoofchk[2];
 	u8 pvid[16];
-	u8 rsvd3[32];
 	u8 rsvd4[32];
 	u8 rsvd5[32];
+	u8 rsvd6[32];
 } __packed;
 
 struct be_cmd_req_set_hsw_config {
@@ -1774,11 +1794,13 @@
 struct amap_get_hsw_resp_context {
 	u8 rsvd0[6];
 	u8 port_fwd_type[3];
-	u8 rsvd1[7];
+	u8 rsvd1[5];
+	u8 spoofchk;
+	u8 rsvd2;
 	u8 pvid[16];
-	u8 rsvd2[32];
 	u8 rsvd3[32];
 	u8 rsvd4[32];
+	u8 rsvd5[32];
 } __packed;
 
 struct be_cmd_req_get_hsw_config {
@@ -2334,9 +2356,9 @@
 			u32 domain);
 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
-			  u16 intf_id, u16 hsw_mode);
+			  u16 intf_id, u16 hsw_mode, u8 spoofchk);
 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
-			  u16 intf_id, u8 *mode);
+			  u16 intf_id, u8 *mode, bool *spoofchk);
 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
 int be_cmd_get_fw_log_level(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2835dee..b2476db 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -123,7 +123,6 @@
 	{DRVSTAT_INFO(dma_map_errors)},
 	/* Number of packets dropped due to random early drop function */
 	{DRVSTAT_INFO(eth_red_drops)},
-	{DRVSTAT_INFO(be_on_die_temperature)},
 	{DRVSTAT_INFO(rx_roce_bytes_lsd)},
 	{DRVSTAT_INFO(rx_roce_bytes_msd)},
 	{DRVSTAT_INFO(rx_roce_frames)},
@@ -368,6 +367,14 @@
 		aic++;
 	}
 
+	/* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
+	 * When AIC is disabled, persistently force set EQD value via the
+	 * FW cmd, so that we don't have to calculate the delay multiplier
+	 * encode value each time EQ_DB is rung
+	 */
+	if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
+		be_eqd_update(adapter, true);
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 4884088..c684bb3 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -132,6 +132,18 @@
 #define DB_EQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */
 /* Rearm bit */
 #define DB_EQ_REARM_SHIFT		(29)	/* bit 29 */
+/* Rearm to interrupt delay encoding */
+#define DB_EQ_R2I_DLY_SHIFT		(30)    /* bits 30 - 31 */
+
+/* Rearm to interrupt (R2I) delay multiplier encoding represents 3 different
+ * values configured in CEV_REARM2IRPT_DLY_MULT_CSR register. This value is
+ * programmed by host driver while ringing an EQ doorbell(EQ_DB) if a delay
+ * between rearming the EQ and next interrupt on this EQ is desired.
+ */
+#define	R2I_DLY_ENC_0			0	/* No delay */
+#define	R2I_DLY_ENC_1			1	/* maps to 160us EQ delay */
+#define	R2I_DLY_ENC_2			2	/* maps to 96us EQ delay */
+#define	R2I_DLY_ENC_3			3	/* maps to 48us EQ delay */
 
 /********* Compl Q door bell *************/
 #define DB_CQ_OFFSET 			0x120
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e43cc8a..6f64242 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -179,7 +179,7 @@
 	if (lancer_chip(adapter))
 		return;
 
-	if (adapter->eeh_error)
+	if (be_check_error(adapter, BE_ERROR_EEH))
 		return;
 
 	status = be_cmd_intr_set(adapter, enable);
@@ -191,6 +191,9 @@
 {
 	u32 val = 0;
 
+	if (be_check_error(adapter, BE_ERROR_HW))
+		return;
+
 	val |= qid & DB_RQ_RING_ID_MASK;
 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
 
@@ -203,6 +206,9 @@
 {
 	u32 val = 0;
 
+	if (be_check_error(adapter, BE_ERROR_HW))
+		return;
+
 	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
 
@@ -211,14 +217,15 @@
 }
 
 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
-			 bool arm, bool clear_int, u16 num_popped)
+			 bool arm, bool clear_int, u16 num_popped,
+			 u32 eq_delay_mult_enc)
 {
 	u32 val = 0;
 
 	val |= qid & DB_EQ_RING_ID_MASK;
 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
 
-	if (adapter->eeh_error)
+	if (be_check_error(adapter, BE_ERROR_HW))
 		return;
 
 	if (arm)
@@ -227,6 +234,7 @@
 		val |= 1 << DB_EQ_CLR_SHIFT;
 	val |= 1 << DB_EQ_EVNT_SHIFT;
 	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+	val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
 	iowrite32(val, adapter->db + DB_EQ_OFFSET);
 }
 
@@ -238,7 +246,7 @@
 	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
 			DB_CQ_RING_ID_EXT_MASK_SHIFT);
 
-	if (adapter->eeh_error)
+	if (be_check_error(adapter, BE_ERROR_HW))
 		return;
 
 	if (arm)
@@ -662,6 +670,8 @@
 		netif_carrier_on(netdev);
 	else
 		netif_carrier_off(netdev);
+
+	netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
 }
 
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -810,6 +820,8 @@
 
 	SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
 	SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
+	SET_TX_WRB_HDR_BITS(mgmt, hdr,
+			    BE_WRB_F_GET(wrb_params->features, OS2BMC));
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -1146,6 +1158,130 @@
 	txo->pend_wrb_cnt = 0;
 }
 
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT	68
+#define DHCP_SERVER_PORT	67
+#define NET_BIOS_PORT1		137
+#define NET_BIOS_PORT2		138
+#define DHCPV6_RAS_PORT		547
+
+#define is_mc_allowed_on_bmc(adapter, eh)	\
+	(!is_multicast_filt_enabled(adapter) &&	\
+	 is_multicast_ether_addr(eh->h_dest) &&	\
+	 !is_broadcast_ether_addr(eh->h_dest))
+
+#define is_bc_allowed_on_bmc(adapter, eh)	\
+	(!is_broadcast_filt_enabled(adapter) &&	\
+	 is_broadcast_ether_addr(eh->h_dest))
+
+#define is_arp_allowed_on_bmc(adapter, skb)	\
+	(is_arp(skb) && is_arp_filt_enabled(adapter))
+
+#define is_broadcast_packet(eh, adapter)	\
+		(is_multicast_ether_addr(eh->h_dest) && \
+		!compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
+
+#define is_arp(skb)	(skb->protocol == htons(ETH_P_ARP))
+
+#define is_arp_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask &	\
+			BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(adapter)	\
+		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
+			       struct sk_buff **skb)
+{
+	struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
+	bool os2bmc = false;
+
+	if (!be_is_os2bmc_enabled(adapter))
+		goto done;
+
+	if (!is_multicast_ether_addr(eh->h_dest))
+		goto done;
+
+	if (is_mc_allowed_on_bmc(adapter, eh) ||
+	    is_bc_allowed_on_bmc(adapter, eh) ||
+	    is_arp_allowed_on_bmc(adapter, (*skb))) {
+		os2bmc = true;
+		goto done;
+	}
+
+	if ((*skb)->protocol == htons(ETH_P_IPV6)) {
+		struct ipv6hdr *hdr = ipv6_hdr((*skb));
+		u8 nexthdr = hdr->nexthdr;
+
+		if (nexthdr == IPPROTO_ICMPV6) {
+			struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
+
+			switch (icmp6->icmp6_type) {
+			case NDISC_ROUTER_ADVERTISEMENT:
+				os2bmc = is_ipv6_ra_filt_enabled(adapter);
+				goto done;
+			case NDISC_NEIGHBOUR_ADVERTISEMENT:
+				os2bmc = is_ipv6_na_filt_enabled(adapter);
+				goto done;
+			default:
+				break;
+			}
+		}
+	}
+
+	if (is_udp_pkt((*skb))) {
+		struct udphdr *udp = udp_hdr((*skb));
+
+		switch (udp->dest) {
+		case DHCP_CLIENT_PORT:
+			os2bmc = is_dhcp_client_filt_enabled(adapter);
+			goto done;
+		case DHCP_SERVER_PORT:
+			os2bmc = is_dhcp_srvr_filt_enabled(adapter);
+			goto done;
+		case NET_BIOS_PORT1:
+		case NET_BIOS_PORT2:
+			os2bmc = is_nbios_filt_enabled(adapter);
+			goto done;
+		case DHCPV6_RAS_PORT:
+			os2bmc = is_ipv6_ras_filt_enabled(adapter);
+			goto done;
+		default:
+			break;
+		}
+	}
+done:
+	/* For packets over a vlan, which are destined
+	 * to BMC, asic expects the vlan to be inline in the packet.
+	 */
+	if (os2bmc)
+		*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+
+	return os2bmc;
+}
+
 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
@@ -1167,6 +1303,18 @@
 		goto drop;
 	}
 
+	/* if os2bmc is enabled and if the pkt is destined to bmc,
+	 * enqueue the pkt a 2nd time with mgmt bit set.
+	 */
+	if (be_send_pkt_to_bmc(adapter, &skb)) {
+		BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+		wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+		if (unlikely(!wrb_cnt))
+			goto drop;
+		else
+			skb_get(skb);
+	}
+
 	if (be_is_txq_full(txo)) {
 		netif_stop_subqueue(netdev, q_idx);
 		tx_stats(txo)->tx_stops++;
@@ -1265,7 +1413,8 @@
 	if (status) {
 		dev_err(dev, "Setting HW VLAN filtering failed\n");
 		/* Set to VLAN promisc mode as setting VLAN filter failed */
-		if (addl_status(status) ==
+		if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+		    addl_status(status) ==
 				MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
 			return be_set_vlan_promisc(adapter);
 	} else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
@@ -1466,6 +1615,7 @@
 	vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
 	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 	vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
+	vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
 
 	return 0;
 }
@@ -1478,7 +1628,7 @@
 	int status;
 
 	/* Enable Transparent VLAN Tagging */
-	status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+	status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
 	if (status)
 		return status;
 
@@ -1507,7 +1657,7 @@
 
 	/* Reset Transparent VLAN Tagging. */
 	status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
-				       vf_cfg->if_handle, 0);
+				       vf_cfg->if_handle, 0, 0);
 	if (status)
 		return status;
 
@@ -1642,6 +1792,39 @@
 	return 0;
 }
 
+static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+	u8 spoofchk;
+	int status;
+
+	if (!sriov_enabled(adapter))
+		return -EPERM;
+
+	if (vf >= adapter->num_vfs)
+		return -EINVAL;
+
+	if (BEx_chip(adapter))
+		return -EOPNOTSUPP;
+
+	if (enable == vf_cfg->spoofchk)
+		return 0;
+
+	spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
+
+	status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
+				       0, spoofchk);
+	if (status) {
+		dev_err(&adapter->pdev->dev,
+			"Spoofchk change on VF %d failed: %#x\n", vf, status);
+		return be_cmd_status(status);
+	}
+
+	vf_cfg->spoofchk = enable;
+	return 0;
+}
+
 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
 			  ulong now)
 {
@@ -1650,61 +1833,110 @@
 	aic->jiffies = now;
 }
 
-static void be_eqd_update(struct be_adapter *adapter)
+static int be_get_new_eqd(struct be_eq_obj *eqo)
 {
-	struct be_set_eqd set_eqd[MAX_EVT_QS];
-	int eqd, i, num = 0, start;
+	struct be_adapter *adapter = eqo->adapter;
+	int eqd, start;
 	struct be_aic_obj *aic;
-	struct be_eq_obj *eqo;
 	struct be_rx_obj *rxo;
 	struct be_tx_obj *txo;
-	u64 rx_pkts, tx_pkts;
+	u64 rx_pkts = 0, tx_pkts = 0;
 	ulong now;
 	u32 pps, delta;
+	int i;
+
+	aic = &adapter->aic_obj[eqo->idx];
+	if (!aic->enable) {
+		if (aic->jiffies)
+			aic->jiffies = 0;
+		eqd = aic->et_eqd;
+		return eqd;
+	}
+
+	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+		do {
+			start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
+			rx_pkts += rxo->stats.rx_pkts;
+		} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+	}
+
+	for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
+		do {
+			start = u64_stats_fetch_begin_irq(&txo->stats.sync);
+			tx_pkts += txo->stats.tx_reqs;
+		} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+	}
+
+	/* Skip, if wrapped around or first calculation */
+	now = jiffies;
+	if (!aic->jiffies || time_before(now, aic->jiffies) ||
+	    rx_pkts < aic->rx_pkts_prev ||
+	    tx_pkts < aic->tx_reqs_prev) {
+		be_aic_update(aic, rx_pkts, tx_pkts, now);
+		return aic->prev_eqd;
+	}
+
+	delta = jiffies_to_msecs(now - aic->jiffies);
+	if (delta == 0)
+		return aic->prev_eqd;
+
+	pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+		(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+	eqd = (pps / 15000) << 2;
+
+	if (eqd < 8)
+		eqd = 0;
+	eqd = min_t(u32, eqd, aic->max_eqd);
+	eqd = max_t(u32, eqd, aic->min_eqd);
+
+	be_aic_update(aic, rx_pkts, tx_pkts, now);
+
+	return eqd;
+}
+
+/* For Skyhawk-R only */
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
+{
+	struct be_adapter *adapter = eqo->adapter;
+	struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
+	ulong now = jiffies;
+	int eqd;
+	u32 mult_enc;
+
+	if (!aic->enable)
+		return 0;
+
+	if (time_before_eq(now, aic->jiffies) ||
+	    jiffies_to_msecs(now - aic->jiffies) < 1)
+		eqd = aic->prev_eqd;
+	else
+		eqd = be_get_new_eqd(eqo);
+
+	if (eqd > 100)
+		mult_enc = R2I_DLY_ENC_1;
+	else if (eqd > 60)
+		mult_enc = R2I_DLY_ENC_2;
+	else if (eqd > 20)
+		mult_enc = R2I_DLY_ENC_3;
+	else
+		mult_enc = R2I_DLY_ENC_0;
+
+	aic->prev_eqd = eqd;
+
+	return mult_enc;
+}
+
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
+{
+	struct be_set_eqd set_eqd[MAX_EVT_QS];
+	struct be_aic_obj *aic;
+	struct be_eq_obj *eqo;
+	int i, num = 0, eqd;
 
 	for_all_evt_queues(adapter, eqo, i) {
 		aic = &adapter->aic_obj[eqo->idx];
-		if (!aic->enable) {
-			if (aic->jiffies)
-				aic->jiffies = 0;
-			eqd = aic->et_eqd;
-			goto modify_eqd;
-		}
-
-		rxo = &adapter->rx_obj[eqo->idx];
-		do {
-			start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
-			rx_pkts = rxo->stats.rx_pkts;
-		} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
-
-		txo = &adapter->tx_obj[eqo->idx];
-		do {
-			start = u64_stats_fetch_begin_irq(&txo->stats.sync);
-			tx_pkts = txo->stats.tx_reqs;
-		} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
-
-		/* Skip, if wrapped around or first calculation */
-		now = jiffies;
-		if (!aic->jiffies || time_before(now, aic->jiffies) ||
-		    rx_pkts < aic->rx_pkts_prev ||
-		    tx_pkts < aic->tx_reqs_prev) {
-			be_aic_update(aic, rx_pkts, tx_pkts, now);
-			continue;
-		}
-
-		delta = jiffies_to_msecs(now - aic->jiffies);
-		pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
-			(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
-		eqd = (pps / 15000) << 2;
-
-		if (eqd < 8)
-			eqd = 0;
-		eqd = min_t(u32, eqd, aic->max_eqd);
-		eqd = max_t(u32, eqd, aic->min_eqd);
-
-		be_aic_update(aic, rx_pkts, tx_pkts, now);
-modify_eqd:
-		if (eqd != aic->prev_eqd) {
+		eqd = be_get_new_eqd(eqo);
+		if (force_update || eqd != aic->prev_eqd) {
 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
 			set_eqd[num].eq_id = eqo->q.id;
 			aic->prev_eqd = eqd;
@@ -2212,7 +2444,7 @@
 {
 	int num = events_get(eqo);
 
-	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
+	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
 }
 
 static void be_rx_cq_clean(struct be_rx_obj *rxo)
@@ -2236,7 +2468,9 @@
 			if (lancer_chip(adapter))
 				break;
 
-			if (flush_wait++ > 10 || be_hw_error(adapter)) {
+			if (flush_wait++ > 50 ||
+			    be_check_error(adapter,
+					   BE_ERROR_HW)) {
 				dev_warn(&adapter->pdev->dev,
 					 "did not receive flush compl\n");
 				break;
@@ -2297,7 +2531,8 @@
 				pending_txqs--;
 		}
 
-		if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
+		if (pending_txqs == 0 || ++timeo > 10 ||
+		    be_check_error(adapter, BE_ERROR_HW))
 			break;
 
 		mdelay(1);
@@ -2573,7 +2808,7 @@
 		if (num_evts)
 			eqo->spurious_intr = 0;
 	}
-	be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+	be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
 
 	/* Return IRQ_HANDLED only for the the first spurious intr
 	 * after a valid intr to stop the kernel from branding
@@ -2589,7 +2824,7 @@
 {
 	struct be_eq_obj *eqo = dev;
 
-	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
 	napi_schedule(&eqo->napi);
 	return IRQ_HANDLED;
 }
@@ -2838,6 +3073,7 @@
 	int max_work = 0, work, i, num_evts;
 	struct be_rx_obj *rxo;
 	struct be_tx_obj *txo;
+	u32 mult_enc = 0;
 
 	num_evts = events_get(eqo);
 
@@ -2863,10 +3099,18 @@
 
 	if (max_work < budget) {
 		napi_complete(napi);
-		be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
+
+		/* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+		 * delay via a delay multiplier encoding value
+		 */
+		if (skyhawk_chip(adapter))
+			mult_enc = be_get_eq_delay_mult_enc(eqo);
+
+		be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
+			     mult_enc);
 	} else {
 		/* As we'll continue in polling mode, count and clear events */
-		be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
+		be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
 	}
 	return max_work;
 }
@@ -2898,22 +3142,19 @@
 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
 	u32 i;
-	bool error_detected = false;
 	struct device *dev = &adapter->pdev->dev;
-	struct net_device *netdev = adapter->netdev;
 
-	if (be_hw_error(adapter))
+	if (be_check_error(adapter, BE_ERROR_HW))
 		return;
 
 	if (lancer_chip(adapter)) {
 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+			be_set_error(adapter, BE_ERROR_UE);
 			sliport_err1 = ioread32(adapter->db +
 						SLIPORT_ERROR1_OFFSET);
 			sliport_err2 = ioread32(adapter->db +
 						SLIPORT_ERROR2_OFFSET);
-			adapter->hw_error = true;
-			error_detected = true;
 			/* Do not log error messages if its a FW reset */
 			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
 			    sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
@@ -2945,12 +3186,12 @@
 		 */
 
 		if (ue_lo || ue_hi) {
-			error_detected = true;
 			dev_err(dev,
 				"Unrecoverable Error detected in the adapter");
 			dev_err(dev, "Please reboot server to recover");
 			if (skyhawk_chip(adapter))
-				adapter->hw_error = true;
+				be_set_error(adapter, BE_ERROR_UE);
+
 			for (i = 0; ue_lo; ue_lo >>= 1, i++) {
 				if (ue_lo & 1)
 					dev_err(dev, "UE: %s bit set\n",
@@ -2963,8 +3204,6 @@
 			}
 		}
 	}
-	if (error_detected)
-		netif_carrier_off(netdev);
 }
 
 static void be_msix_disable(struct be_adapter *adapter)
@@ -3015,7 +3254,7 @@
 	dev_warn(dev, "MSIx enable failed\n");
 
 	/* INTx is not supported in VFs, so fail probe if enable_msix fails */
-	if (!be_physfn(adapter))
+	if (be_virtfn(adapter))
 		return num_vec;
 	return 0;
 }
@@ -3062,7 +3301,7 @@
 		if (status == 0)
 			goto done;
 		/* INTx is not supported for VF */
-		if (!be_physfn(adapter))
+		if (be_virtfn(adapter))
 			return status;
 	}
 
@@ -3229,9 +3468,12 @@
 
 	memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
 
-	/* First time posting */
+	/* Post 1 less than RXQ-len to avoid head being equal to tail,
+	 * which is a queue empty condition
+	 */
 	for_all_rx_queues(adapter, rxo, i)
-		be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+		be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
+
 	return 0;
 }
 
@@ -3263,7 +3505,7 @@
 	for_all_evt_queues(adapter, eqo, i) {
 		napi_enable(&eqo->napi);
 		be_enable_busy_poll(eqo);
-		be_eq_notify(adapter, eqo->q.id, true, true, 0);
+		be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
 	}
 	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
 
@@ -3563,7 +3805,7 @@
 
 	/* If a FW profile exists, then cap_flags are updated */
 	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-		    BE_IF_FLAGS_MULTICAST;
+		    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
 
 	for_all_vfs(adapter, vf_cfg, vf) {
 		if (!BE3_chip(adapter)) {
@@ -3610,6 +3852,7 @@
 	struct device *dev = &adapter->pdev->dev;
 	struct be_vf_cfg *vf_cfg;
 	int status, old_vfs, vf;
+	bool spoofchk;
 
 	old_vfs = pci_num_vf(adapter->pdev);
 
@@ -3657,6 +3900,12 @@
 		if (!old_vfs)
 			be_cmd_config_qos(adapter, 0, 0, vf + 1);
 
+		status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
+					       vf_cfg->if_handle, NULL,
+					       &spoofchk);
+		if (!status)
+			vf_cfg->spoofchk = spoofchk;
+
 		if (!old_vfs) {
 			be_cmd_enable_vf(adapter, vf + 1);
 			be_cmd_set_logical_link_config(adapter,
@@ -3733,8 +3982,9 @@
 	 *    *only* if it is RSS-capable.
 	 */
 	if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
-	    !be_physfn(adapter) || (be_is_mc(adapter) &&
-	    !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
+	    be_virtfn(adapter) ||
+	    (be_is_mc(adapter) &&
+	     !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
 		res->max_tx_qs = 1;
 	} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
 		struct be_resources super_nic_res = {0};
@@ -4075,7 +4325,7 @@
 		msleep(100);
 
 		/* We can clear all errors when function reset succeeds */
-		be_clear_all_error(adapter);
+		be_clear_error(adapter, BE_CLEAR_ALL);
 	}
 
 	/* Tell FW we're ready to fire cmds */
@@ -4182,7 +4432,7 @@
 	int i;
 
 	for_all_evt_queues(adapter, eqo, i) {
-		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
 		napi_schedule(&eqo->napi);
 	}
 }
@@ -4666,14 +4916,11 @@
 	return 0;
 }
 
-#define BE2_UFI		2
-#define BE3_UFI		3
-#define BE3R_UFI	10
-#define SH_UFI		4
-#define SH_P2_UFI	11
-
-static int be_get_ufi_type(struct be_adapter *adapter,
-			   struct flash_file_hdr_g3 *fhdr)
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+				       struct flash_file_hdr_g3 *fhdr)
 {
 	if (!fhdr) {
 		dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
@@ -4685,43 +4932,22 @@
 	 */
 	switch (fhdr->build[0]) {
 	case BLD_STR_UFI_TYPE_SH:
-		return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
-								SH_UFI;
+		if (!skyhawk_chip(adapter))
+			return false;
+		break;
 	case BLD_STR_UFI_TYPE_BE3:
-		return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
-								BE3_UFI;
+		if (!BE3_chip(adapter))
+			return false;
+		break;
 	case BLD_STR_UFI_TYPE_BE2:
-		return BE2_UFI;
-	default:
-		return -1;
-	}
-}
-
-/* Check if the flash image file is compatible with the adapter that
- * is being flashed.
- * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
- * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
- */
-static bool be_check_ufi_compatibility(struct be_adapter *adapter,
-				       struct flash_file_hdr_g3 *fhdr)
-{
-	int ufi_type = be_get_ufi_type(adapter, fhdr);
-
-	switch (ufi_type) {
-	case SH_P2_UFI:
-		return skyhawk_chip(adapter);
-	case SH_UFI:
-		return (skyhawk_chip(adapter) &&
-			adapter->asic_rev < ASIC_REV_P2);
-	case BE3R_UFI:
-		return BE3_chip(adapter);
-	case BE3_UFI:
-		return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
-	case BE2_UFI:
-		return BE2_chip(adapter);
+		if (!BE2_chip(adapter))
+			return false;
+		break;
 	default:
 		return false;
 	}
+
+	return (fhdr->asic_type_rev >= adapter->asic_rev);
 }
 
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
@@ -4829,7 +5055,7 @@
 					       adapter->if_handle,
 					       mode == BRIDGE_MODE_VEPA ?
 					       PORT_FWD_TYPE_VEPA :
-					       PORT_FWD_TYPE_VEB);
+					       PORT_FWD_TYPE_VEB, 0);
 		if (status)
 			goto err;
 
@@ -4861,7 +5087,8 @@
 		hsw_mode = PORT_FWD_TYPE_VEB;
 	} else {
 		status = be_cmd_get_hsw_config(adapter, NULL, 0,
-					       adapter->if_handle, &hsw_mode);
+					       adapter->if_handle, &hsw_mode,
+					       NULL);
 		if (status)
 			return 0;
 	}
@@ -4869,7 +5096,7 @@
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
 				       hsw_mode == PORT_FWD_TYPE_VEPA ?
 				       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
-				       0, 0, nlflags);
+				       0, 0, nlflags, filter_mask, NULL);
 }
 
 #ifdef CONFIG_BE2NET_VXLAN
@@ -5014,6 +5241,7 @@
 	.ndo_set_vf_rate	= be_set_vf_tx_rate,
 	.ndo_get_vf_config	= be_get_vf_config,
 	.ndo_set_vf_link_state  = be_set_vf_link_state,
+	.ndo_set_vf_spoofchk    = be_set_vf_spoofchk,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= be_netpoll,
 #endif
@@ -5118,7 +5346,7 @@
 
 	be_detect_error(adapter);
 
-	if (adapter->hw_error) {
+	if (be_check_error(adapter, BE_ERROR_HW)) {
 		be_cleanup(adapter);
 
 		/* As of now error recovery support is in Lancer only */
@@ -5182,7 +5410,9 @@
 			be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
 	}
 
-	be_eqd_update(adapter);
+	/* EQ-delay update for Skyhawk is done while notifying EQ */
+	if (!skyhawk_chip(adapter))
+		be_eqd_update(adapter, false);
 
 	if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
 		be_log_sfp_info(adapter);
@@ -5202,7 +5432,7 @@
 
 static int db_bar(struct be_adapter *adapter)
 {
-	if (lancer_chip(adapter) || !be_physfn(adapter))
+	if (lancer_chip(adapter) || be_virtfn(adapter))
 		return 0;
 	else
 		return 4;
@@ -5381,6 +5611,30 @@
 	free_netdev(adapter->netdev);
 }
 
+static ssize_t be_hwmon_show_temp(struct device *dev,
+				  struct device_attribute *dev_attr,
+				  char *buf)
+{
+	struct be_adapter *adapter = dev_get_drvdata(dev);
+
+	/* Unit: millidegree Celsius */
+	if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
+		return -EIO;
+	else
+		return sprintf(buf, "%u\n",
+			       adapter->hwmon_info.be_on_die_temp * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+			  be_hwmon_show_temp, NULL, 1);
+
+static struct attribute *be_hwmon_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(be_hwmon);
+
 static char *mc_name(struct be_adapter *adapter)
 {
 	char *str = "";	/* default */
@@ -5500,6 +5754,16 @@
 
 	be_schedule_err_detection(adapter);
 
+	/* On Die temperature not supported for VF. */
+	if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
+		adapter->hwmon_info.hwmon_dev =
+			devm_hwmon_device_register_with_groups(&pdev->dev,
+							       DRV_NAME,
+							       adapter,
+							       be_hwmon_groups);
+		adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
+	}
+
 	dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
 		 func_name(adapter), mc_name(adapter), adapter->port_name);
 
@@ -5592,8 +5856,8 @@
 
 	dev_err(&adapter->pdev->dev, "EEH error detected\n");
 
-	if (!adapter->eeh_error) {
-		adapter->eeh_error = true;
+	if (!be_check_error(adapter, BE_ERROR_EEH)) {
+		be_set_error(adapter, BE_ERROR_EEH);
 
 		be_cancel_err_detection(adapter);
 
@@ -5640,7 +5904,7 @@
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	pci_cleanup_aer_uncorrect_error_status(pdev);
-	be_clear_all_error(adapter);
+	be_clear_error(adapter, BE_CLEAR_ALL);
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 1328664..6036820 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index e6f7eb1..cde6ef9 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
new file mode 100644
index 0000000..48ecbc8
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -0,0 +1,26 @@
+#
+# EZchip network device configuration
+#
+
+config NET_VENDOR_EZCHIP
+	bool "EZchip devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) device belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about EZchip devices. If you say Y, you will be asked for
+	  your specific device in the following questions.
+
+if NET_VENDOR_EZCHIP
+
+config EZCHIP_NPS_MANAGEMENT_ENET
+	tristate "EZchip NPS management enet support"
+	depends on OF_IRQ && OF_NET
+	---help---
+	  Simple LAN device for debug or management purposes.
+	  Device supports interrupts for RX and TX(completion).
+	  Device does not have DMA ability.
+
+endif
diff --git a/drivers/net/ethernet/ezchip/Makefile b/drivers/net/ethernet/ezchip/Makefile
new file mode 100644
index 0000000..e490176
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EZCHIP_NPS_MANAGEMENT_ENET) += nps_enet.o
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
new file mode 100644
index 0000000..24a85b2
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include "nps_enet.h"
+
+#define DRV_NAME			"nps_mgt_enet"
+
+static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32));
+
+	/* Empty Rx FIFO buffer by reading all words */
+	for (i = 0; i < len; i++)
+		nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+}
+
+static void nps_enet_read_rx_fifo(struct net_device *ndev,
+				  unsigned char *dst, u32 length)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	s32 i, last = length & (sizeof(u32) - 1);
+	u32 *reg = (u32 *)dst, len = length / sizeof(u32);
+	bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32));
+
+	/* In case dst is not aligned we need an intermediate buffer */
+	if (dst_is_aligned)
+		for (i = 0; i < len; i++, reg++)
+			*reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+	else { /* !dst_is_aligned */
+		for (i = 0; i < len; i++, reg++) {
+			u32 buf =
+				nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+
+			/* to accommodate word-unaligned address of "reg"
+			 * we have to do memcpy_toio() instead of simple "=".
+			 */
+			memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
+		}
+	}
+
+	/* copy last bytes (if any) */
+	if (last) {
+		u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+
+		memcpy_toio((void __iomem *)reg, &buf, last);
+	}
+}
+
+static u32 nps_enet_rx_handler(struct net_device *ndev)
+{
+	u32 frame_len, err = 0;
+	u32 work_done = 0;
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct sk_buff *skb;
+	struct nps_enet_rx_ctl rx_ctrl;
+
+	rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+	frame_len = rx_ctrl.nr;
+
+	/* Check if we got RX */
+	if (!rx_ctrl.cr)
+		return work_done;
+
+	/* If we got here there is a work for us */
+	work_done++;
+
+	/* Check Rx error */
+	if (rx_ctrl.er) {
+		ndev->stats.rx_errors++;
+		err = 1;
+	}
+
+	/* Check Rx CRC error */
+	if (rx_ctrl.crc) {
+		ndev->stats.rx_crc_errors++;
+		ndev->stats.rx_dropped++;
+		err = 1;
+	}
+
+	/* Check Frame length Min 64b */
+	if (unlikely(frame_len < ETH_ZLEN)) {
+		ndev->stats.rx_length_errors++;
+		ndev->stats.rx_dropped++;
+		err = 1;
+	}
+
+	if (err)
+		goto rx_irq_clean;
+
+	/* Skb allocation */
+	skb = netdev_alloc_skb_ip_align(ndev, frame_len);
+	if (unlikely(!skb)) {
+		ndev->stats.rx_errors++;
+		ndev->stats.rx_dropped++;
+		goto rx_irq_clean;
+	}
+
+	/* Copy frame from Rx fifo into the skb */
+	nps_enet_read_rx_fifo(ndev, skb->data, frame_len);
+
+	skb_put(skb, frame_len);
+	skb->protocol = eth_type_trans(skb, ndev);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	ndev->stats.rx_packets++;
+	ndev->stats.rx_bytes += frame_len;
+	netif_receive_skb(skb);
+
+	goto rx_irq_frame_done;
+
+rx_irq_clean:
+	/* Clean Rx fifo */
+	nps_enet_clean_rx_fifo(ndev, frame_len);
+
+rx_irq_frame_done:
+	/* Ack Rx ctrl register */
+	nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0);
+
+	return work_done;
+}
+
+static void nps_enet_tx_handler(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_tx_ctl tx_ctrl;
+
+	tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+
+	/* Check if we got TX */
+	if (!priv->tx_packet_sent || tx_ctrl.ct)
+		return;
+
+	/* Check Tx transmit error */
+	if (unlikely(tx_ctrl.et)) {
+		ndev->stats.tx_errors++;
+	} else {
+		ndev->stats.tx_packets++;
+		ndev->stats.tx_bytes += tx_ctrl.nt;
+	}
+
+	if (priv->tx_skb) {
+		dev_kfree_skb(priv->tx_skb);
+		priv->tx_skb = NULL;
+	}
+
+	priv->tx_packet_sent = false;
+
+	if (netif_queue_stopped(ndev))
+		netif_wake_queue(ndev);
+}
+
+/**
+ * nps_enet_poll - NAPI poll handler.
+ * @napi:       Pointer to napi_struct structure.
+ * @budget:     How many frames to process on one call.
+ *
+ * returns:     Number of processed frames
+ */
+static int nps_enet_poll(struct napi_struct *napi, int budget)
+{
+	struct net_device *ndev = napi->dev;
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_buf_int_enable buf_int_enable;
+	u32 work_done;
+
+	buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+	buf_int_enable.tx_done = NPS_ENET_ENABLE;
+	nps_enet_tx_handler(ndev);
+	work_done = nps_enet_rx_handler(ndev);
+	if (work_done < budget) {
+		napi_complete(napi);
+		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
+				 buf_int_enable.value);
+	}
+
+	return work_done;
+}
+
+/**
+ * nps_enet_irq_handler - Global interrupt handler for ENET.
+ * @irq:                irq number.
+ * @dev_instance:       device instance.
+ *
+ * returns: IRQ_HANDLED for all cases.
+ *
+ * EZchip ENET has 2 interrupt causes, and depending on bits raised in
+ * CTRL registers we may tell what is a reason for interrupt to fire up.
+ * We got one for RX and the other for TX (completion).
+ */
+static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
+{
+	struct net_device *ndev = dev_instance;
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_buf_int_cause buf_int_cause;
+
+	buf_int_cause.value =
+			nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+
+	if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+		if (likely(napi_schedule_prep(&priv->napi))) {
+			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
+			__napi_schedule(&priv->napi);
+		}
+
+	return IRQ_HANDLED;
+}
+
+static void nps_enet_set_hw_mac_address(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_ge_mac_cfg_1 ge_mac_cfg_1;
+	struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
+
+	/* set MAC address in HW */
+	ge_mac_cfg_1.octet_0 = ndev->dev_addr[0];
+	ge_mac_cfg_1.octet_1 = ndev->dev_addr[1];
+	ge_mac_cfg_1.octet_2 = ndev->dev_addr[2];
+	ge_mac_cfg_1.octet_3 = ndev->dev_addr[3];
+	ge_mac_cfg_2->octet_4 = ndev->dev_addr[4];
+	ge_mac_cfg_2->octet_5 = ndev->dev_addr[5];
+
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1,
+			 ge_mac_cfg_1.value);
+
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
+			 ge_mac_cfg_2->value);
+}
+
+/**
+ * nps_enet_hw_reset - Reset the network device.
+ * @ndev:       Pointer to the network device.
+ *
+ * This function reset the PCS and TX fifo.
+ * The programming model is to set the relevant reset bits
+ * wait for some time for this to propagate and then unset
+ * the reset bits. This way we ensure that reset procedure
+ * is done successfully by device.
+ */
+static void nps_enet_hw_reset(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_ge_rst ge_rst;
+	struct nps_enet_phase_fifo_ctl phase_fifo_ctl;
+
+	ge_rst.value = 0;
+	phase_fifo_ctl.value = 0;
+	/* Pcs reset sequence*/
+	ge_rst.gmac_0 = NPS_ENET_ENABLE;
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+	usleep_range(10, 20);
+	ge_rst.value = 0;
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+
+	/* Tx fifo reset sequence */
+	phase_fifo_ctl.rst = NPS_ENET_ENABLE;
+	phase_fifo_ctl.init = NPS_ENET_ENABLE;
+	nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
+			 phase_fifo_ctl.value);
+	usleep_range(10, 20);
+	phase_fifo_ctl.value = 0;
+	nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
+			 phase_fifo_ctl.value);
+}
+
+static void nps_enet_hw_enable_control(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_ge_mac_cfg_0 ge_mac_cfg_0;
+	struct nps_enet_buf_int_enable buf_int_enable;
+	struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
+	struct nps_enet_ge_mac_cfg_3 *ge_mac_cfg_3 = &priv->ge_mac_cfg_3;
+	s32 max_frame_length;
+
+	ge_mac_cfg_0.value = 0;
+	buf_int_enable.value = 0;
+	/* Enable Rx and Tx statistics */
+	ge_mac_cfg_2->stat_en = NPS_ENET_GE_MAC_CFG_2_STAT_EN;
+
+	/* Discard packets with different MAC address */
+	ge_mac_cfg_2->disc_da = NPS_ENET_ENABLE;
+
+	/* Discard multicast packets */
+	ge_mac_cfg_2->disc_mc = NPS_ENET_ENABLE;
+
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
+			 ge_mac_cfg_2->value);
+
+	/* Discard Packets bigger than max frame length */
+	max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
+	if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+		ge_mac_cfg_3->max_len = max_frame_length;
+		nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+				 ge_mac_cfg_3->value);
+	}
+
+	/* Enable interrupts */
+	buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+	buf_int_enable.tx_done = NPS_ENET_ENABLE;
+	nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
+			 buf_int_enable.value);
+
+	/* Write device MAC address to HW */
+	nps_enet_set_hw_mac_address(ndev);
+
+	/* Rx and Tx HW features */
+	ge_mac_cfg_0.tx_pad_en = NPS_ENET_ENABLE;
+	ge_mac_cfg_0.tx_crc_en = NPS_ENET_ENABLE;
+	ge_mac_cfg_0.rx_crc_strip = NPS_ENET_ENABLE;
+
+	/* IFG configuration */
+	ge_mac_cfg_0.rx_ifg = NPS_ENET_GE_MAC_CFG_0_RX_IFG;
+	ge_mac_cfg_0.tx_ifg = NPS_ENET_GE_MAC_CFG_0_TX_IFG;
+
+	/* preamble configuration */
+	ge_mac_cfg_0.rx_pr_check_en = NPS_ENET_ENABLE;
+	ge_mac_cfg_0.tx_pr_len = NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN;
+
+	/* enable flow control frames */
+	ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
+	ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
+	ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+
+	/* Enable Rx and Tx */
+	ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
+	ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
+			 ge_mac_cfg_0.value);
+}
+
+static void nps_enet_hw_disable_control(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+
+	/* Disable interrupts */
+	nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
+
+	/* Disable Rx and Tx */
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 0);
+}
+
+static void nps_enet_send_frame(struct net_device *ndev,
+				struct sk_buff *skb)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_tx_ctl tx_ctrl;
+	short length = skb->len;
+	u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
+	u32 *src = (u32 *)virt_to_phys(skb->data);
+	bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
+
+	tx_ctrl.value = 0;
+	/* In case src is not aligned we need an intermediate buffer */
+	if (src_is_aligned)
+		for (i = 0; i < len; i++, src++)
+			nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
+	else { /* !src_is_aligned */
+		for (i = 0; i < len; i++, src++) {
+			u32 buf;
+
+			/* to accommodate word-unaligned address of "src"
+			 * we have to do memcpy_fromio() instead of simple "="
+			 */
+			memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
+			nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
+		}
+	}
+	/* Write the length of the Frame */
+	tx_ctrl.nt = length;
+
+	/* Indicate SW is done */
+	priv->tx_packet_sent = true;
+	tx_ctrl.ct = NPS_ENET_ENABLE;
+
+	/* Send Frame */
+	nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl.value);
+}
+
+/**
+ * nps_enet_set_mac_address - Set the MAC address for this device.
+ * @ndev:       Pointer to net_device structure.
+ * @p:          6 byte Address to be written as MAC address.
+ *
+ * This function copies the HW address from the sockaddr structure to the
+ * net_device structure and updates the address in HW.
+ *
+ * returns:     -EBUSY if the net device is busy or 0 if the address is set
+ *              successfully.
+ */
+static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct sockaddr *addr = p;
+	s32 res;
+
+	if (netif_running(ndev))
+		return -EBUSY;
+
+	res = eth_mac_addr(ndev, p);
+	if (!res) {
+		ether_addr_copy(ndev->dev_addr, addr->sa_data);
+		nps_enet_set_hw_mac_address(ndev);
+	}
+
+	return res;
+}
+
+/**
+ * nps_enet_set_rx_mode - Change the receive filtering mode.
+ * @ndev:       Pointer to the network device.
+ *
+ * This function enables/disables promiscuous mode
+ */
+static void nps_enet_set_rx_mode(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
+
+	ge_mac_cfg_2.value = priv->ge_mac_cfg_2.value;
+
+	if (ndev->flags & IFF_PROMISC) {
+		ge_mac_cfg_2.disc_da = NPS_ENET_DISABLE;
+		ge_mac_cfg_2.disc_mc = NPS_ENET_DISABLE;
+	} else {
+		ge_mac_cfg_2.disc_da = NPS_ENET_ENABLE;
+		ge_mac_cfg_2.disc_mc = NPS_ENET_ENABLE;
+	}
+
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2.value);
+}
+
+/**
+ * nps_enet_open - Open the network device.
+ * @ndev:       Pointer to the network device.
+ *
+ * returns: 0, on success or non-zero error value on failure.
+ *
+ * This function sets the MAC address, requests and enables an IRQ
+ * for the ENET device and starts the Tx queue.
+ */
+static s32 nps_enet_open(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+	s32 err;
+
+	/* Reset private variables */
+	priv->tx_packet_sent = false;
+	priv->ge_mac_cfg_2.value = 0;
+	priv->ge_mac_cfg_3.value = 0;
+
+	/* ge_mac_cfg_3 default values */
+	priv->ge_mac_cfg_3.rx_ifg_th = NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH;
+	priv->ge_mac_cfg_3.max_len = NPS_ENET_GE_MAC_CFG_3_MAX_LEN;
+
+	/* Disable HW device */
+	nps_enet_hw_disable_control(ndev);
+
+	/* irq Rx allocation */
+	err = request_irq(priv->irq, nps_enet_irq_handler,
+			  0, "enet-rx-tx", ndev);
+	if (err)
+		return err;
+
+	napi_enable(&priv->napi);
+
+	/* Enable HW device */
+	nps_enet_hw_reset(ndev);
+	nps_enet_hw_enable_control(ndev);
+
+	netif_start_queue(ndev);
+
+	return 0;
+}
+
+/**
+ * nps_enet_stop - Close the network device.
+ * @ndev:       Pointer to the network device.
+ *
+ * This function stops the Tx queue, disables interrupts for the ENET device.
+ */
+static s32 nps_enet_stop(struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+
+	napi_disable(&priv->napi);
+	netif_stop_queue(ndev);
+	nps_enet_hw_disable_control(ndev);
+	free_irq(priv->irq, ndev);
+
+	return 0;
+}
+
+/**
+ * nps_enet_start_xmit - Starts the data transmission.
+ * @skb:        sk_buff pointer that contains data to be Transmitted.
+ * @ndev:       Pointer to net_device structure.
+ *
+ * returns: NETDEV_TX_OK, on success
+ *              NETDEV_TX_BUSY, if any of the descriptors are not free.
+ *
+ * This function is invoked from upper layers to initiate transmission.
+ */
+static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
+				       struct net_device *ndev)
+{
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+
+	/* This driver handles one frame at a time  */
+	netif_stop_queue(ndev);
+
+	nps_enet_send_frame(ndev, skb);
+
+	priv->tx_skb = skb;
+
+	return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nps_enet_poll_controller(struct net_device *ndev)
+{
+	disable_irq(ndev->irq);
+	nps_enet_irq_handler(ndev->irq, ndev);
+	enable_irq(ndev->irq);
+}
+#endif
+
+static const struct net_device_ops nps_netdev_ops = {
+	.ndo_open		= nps_enet_open,
+	.ndo_stop		= nps_enet_stop,
+	.ndo_start_xmit		= nps_enet_start_xmit,
+	.ndo_set_mac_address	= nps_enet_set_mac_address,
+	.ndo_set_rx_mode        = nps_enet_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= nps_enet_poll_controller,
+#endif
+};
+
+static s32 nps_enet_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct net_device *ndev;
+	struct nps_enet_priv *priv;
+	s32 err = 0;
+	const char *mac_addr;
+	struct resource *res_regs;
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	ndev = alloc_etherdev(sizeof(struct nps_enet_priv));
+	if (!ndev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, ndev);
+	SET_NETDEV_DEV(ndev, dev);
+	priv = netdev_priv(ndev);
+
+	/* The EZ NET specific entries in the device structure. */
+	ndev->netdev_ops = &nps_netdev_ops;
+	ndev->watchdog_timeo = (400 * HZ / 1000);
+	/* FIXME :: no multicast support yet */
+	ndev->flags &= ~IFF_MULTICAST;
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs_base = devm_ioremap_resource(dev, res_regs);
+	if (IS_ERR(priv->regs_base)) {
+		err = PTR_ERR(priv->regs_base);
+		goto out_netdev;
+	}
+	dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
+
+	/* set kernel MAC address to dev */
+	mac_addr = of_get_mac_address(dev->of_node);
+	if (mac_addr)
+		ether_addr_copy(ndev->dev_addr, mac_addr);
+	else
+		eth_hw_addr_random(ndev);
+
+	/* Get IRQ number */
+	priv->irq = platform_get_irq(pdev, 0);
+	if (!priv->irq) {
+		dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
+		err = -ENODEV;
+		goto out_netdev;
+	}
+
+	netif_napi_add(ndev, &priv->napi, nps_enet_poll,
+		       NPS_ENET_NAPI_POLL_WEIGHT);
+
+	/* Register the driver. Should be the last thing in probe */
+	err = register_netdev(ndev);
+	if (err) {
+		dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n",
+			ndev->name, (s32)err);
+		goto out_netif_api;
+	}
+
+	dev_info(dev, "(rx/tx=%d)\n", priv->irq);
+	return 0;
+
+out_netif_api:
+	netif_napi_del(&priv->napi);
+out_netdev:
+	if (err)
+		free_netdev(ndev);
+
+	return err;
+}
+
+static s32 nps_enet_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct nps_enet_priv *priv = netdev_priv(ndev);
+
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+	netif_napi_del(&priv->napi);
+
+	return 0;
+}
+
+static const struct of_device_id nps_enet_dt_ids[] = {
+	{ .compatible = "ezchip,nps-mgt-enet" },
+	{ /* Sentinel */ }
+};
+
+static struct platform_driver nps_enet_driver = {
+	.probe = nps_enet_probe,
+	.remove = nps_enet_remove,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table  = nps_enet_dt_ids,
+	},
+};
+
+module_platform_driver(nps_enet_driver);
+
+MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
new file mode 100644
index 0000000..fc45c9d
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef _NPS_ENET_H
+#define _NPS_ENET_H
+
+/* default values */
+#define NPS_ENET_NAPI_POLL_WEIGHT		0x2
+#define NPS_ENET_MAX_FRAME_LENGTH		0x3FFF
+#define NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR	0x7
+#define NPS_ENET_GE_MAC_CFG_0_RX_IFG		0x5
+#define NPS_ENET_GE_MAC_CFG_0_TX_IFG		0xC
+#define NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN		0x7
+#define NPS_ENET_GE_MAC_CFG_2_STAT_EN		0x3
+#define NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH		0x14
+#define NPS_ENET_GE_MAC_CFG_3_MAX_LEN		0x3FFC
+#define NPS_ENET_ENABLE				1
+#define NPS_ENET_DISABLE			0
+
+/* register definitions  */
+#define NPS_ENET_REG_TX_CTL		0x800
+#define NPS_ENET_REG_TX_BUF		0x808
+#define NPS_ENET_REG_RX_CTL		0x810
+#define NPS_ENET_REG_RX_BUF		0x818
+#define NPS_ENET_REG_BUF_INT_ENABLE	0x8C0
+#define NPS_ENET_REG_BUF_INT_CAUSE	0x8C4
+#define NPS_ENET_REG_GE_MAC_CFG_0	0x1000
+#define NPS_ENET_REG_GE_MAC_CFG_1	0x1004
+#define NPS_ENET_REG_GE_MAC_CFG_2	0x1008
+#define NPS_ENET_REG_GE_MAC_CFG_3	0x100C
+#define NPS_ENET_REG_GE_RST		0x1400
+#define NPS_ENET_REG_PHASE_FIFO_CTL	0x1404
+
+/* Tx control register */
+struct nps_enet_tx_ctl {
+	union {
+		/* ct: SW sets to indicate frame ready in Tx buffer for
+		 *     transmission. HW resets to when transmission done
+		 * et: Transmit error
+		 * nt: Length in bytes of Tx frame loaded to Tx buffer
+		 */
+		struct {
+			u32
+			__reserved_1:16,
+			ct:1,
+			et:1,
+			__reserved_2:3,
+			nt:11;
+		};
+
+		u32 value;
+	};
+};
+
+/* Rx control register */
+struct nps_enet_rx_ctl {
+	union {
+		/* cr:  HW sets to indicate frame ready in Rx buffer.
+		 *      SW resets to indicate host read received frame
+		 *      and new frames can be written to Rx buffer
+		 * er:  Rx error indication
+		 * crc: Rx CRC error indication
+		 * nr:  Length in bytes of Rx frame loaded by MAC to Rx buffer
+		 */
+		struct {
+			u32
+			__reserved_1:16,
+			cr:1,
+			er:1,
+			crc:1,
+			__reserved_2:2,
+			nr:11;
+		};
+
+		u32 value;
+	};
+};
+
+/* Interrupt enable for data buffer events register */
+struct nps_enet_buf_int_enable {
+	union {
+		/* tx_done: Interrupt generation in the case when new frame
+		 *          is ready in Rx buffer
+		 * rx_rdy:  Interrupt generation in the case when current frame
+		 *          was read from TX buffer
+		 */
+		struct {
+			u32
+			__reserved:30,
+			tx_done:1,
+			rx_rdy:1;
+		};
+
+		u32 value;
+	};
+};
+
+/* Interrupt cause for data buffer events register */
+struct nps_enet_buf_int_cause {
+	union {
+		/* tx_done: Interrupt in the case when current frame was
+		 *          read from TX buffer.
+		 * rx_rdy:  Interrupt in the case when new frame is ready
+		 *          in RX buffer.
+		 */
+		struct {
+			u32
+			__reserved:30,
+			tx_done:1,
+			rx_rdy:1;
+		};
+
+		u32 value;
+	};
+};
+
+/* Gbps Eth MAC Configuration 0 register */
+struct nps_enet_ge_mac_cfg_0 {
+	union {
+		/* tx_pr_len:          Transmit preamble length in bytes
+		 * tx_ifg_nib:         Tx idle pattern
+		 * nib_mode:           Nibble (4-bit) Mode
+		 * rx_pr_check_en:     Receive preamble Check Enable
+		 * tx_ifg:             Transmit inter-Frame Gap
+		 * rx_ifg:             Receive inter-Frame Gap
+		 * tx_fc_retr:         Transmit Flow Control Retransmit Mode
+		 * rx_length_check_en: Receive Length Check Enable
+		 * rx_crc_ignore:      Results of the CRC check are ignored
+		 * rx_crc_strip:       MAC strips the CRC from received frames
+		 * rx_fc_en:           Receive Flow Control Enable
+		 * tx_crc_en:          Transmit CRC Enabled
+		 * tx_pad_en:          Transmit Padding Enable
+		 * tx_cf_en:           Transmit Flow Control Enable
+		 * tx_en:              Transmit Enable
+		 * rx_en:              Receive Enable
+		 */
+		struct {
+			u32
+			tx_pr_len:4,
+			tx_ifg_nib:4,
+			nib_mode:1,
+			rx_pr_check_en:1,
+			tx_ifg:6,
+			rx_ifg:4,
+			tx_fc_retr:3,
+			rx_length_check_en:1,
+			rx_crc_ignore:1,
+			rx_crc_strip:1,
+			rx_fc_en:1,
+			tx_crc_en:1,
+			tx_pad_en:1,
+			tx_fc_en:1,
+			tx_en:1,
+			rx_en:1;
+		};
+
+		u32 value;
+	};
+};
+
+/* Gbps Eth MAC Configuration 1 register */
+struct nps_enet_ge_mac_cfg_1 {
+	union {
+		/* octet_3: MAC address octet 3
+		 * octet_2: MAC address octet 2
+		 * octet_1: MAC address octet 1
+		 * octet_0: MAC address octet 0
+		 */
+		struct {
+			u32
+			octet_3:8,
+			octet_2:8,
+			octet_1:8,
+			octet_0:8;
+		};
+
+		u32 value;
+	};
+};
+
+/* Gbps Eth MAC Configuration 2 register */
+struct nps_enet_ge_mac_cfg_2 {
+	union {
+		/* transmit_flush_en: MAC flush enable
+		 * stat_en:           RMON statistics interface enable
+		 * disc_da:           Discard frames with DA different
+		 *                    from MAC address
+		 * disc_bc:           Discard broadcast frames
+		 * disc_mc:           Discard multicast frames
+		 * octet_5:           MAC address octet 5
+		 * octet_4:           MAC address octet 4
+		 */
+		struct {
+			u32
+			transmit_flush_en:1,
+			__reserved_1:5,
+			stat_en:2,
+			__reserved_2:1,
+			disc_da:1,
+			disc_bc:1,
+			disc_mc:1,
+			__reserved_3:4,
+			octet_5:8,
+			octet_4:8;
+		};
+
+		u32 value;
+	};
+};
+
+/* Gbps Eth MAC Configuration 3 register */
+struct nps_enet_ge_mac_cfg_3 {
+	union {
+		/* ext_oob_cbfc_sel:  Selects one of the 4 profiles for
+		 *                    extended OOB in-flow-control indication
+		 * max_len:           Maximum receive frame length in bytes
+		 * tx_cbfc_en:        Enable transmission of class-based
+		 *                    flow control packets
+		 * rx_ifg_th:         Threshold for IFG status reporting via OOB
+		 * cf_timeout:        Configurable time to decrement FC counters
+		 * cf_drop:           Drop control frames
+		 * redirect_cbfc_sel: Selects one of CBFC redirect profiles
+		 * rx_cbfc_redir_en:  Enable Rx class-based flow
+		 *                    control redirect
+		 * rx_cbfc_en:        Enable Rx class-based flow control
+		 * tm_hd_mode:        TM header mode
+		 */
+		struct {
+			u32
+			ext_oob_cbfc_sel:2,
+			max_len:14,
+			tx_cbfc_en:1,
+			rx_ifg_th:5,
+			cf_timeout:4,
+			cf_drop:1,
+			redirect_cbfc_sel:2,
+			rx_cbfc_redir_en:1,
+			rx_cbfc_en:1,
+			tm_hd_mode:1;
+		};
+
+		u32 value;
+	};
+};
+
+/* GE MAC, PCS reset control register */
+struct nps_enet_ge_rst {
+	union {
+		/* gmac_0: GE MAC reset
+		 * spcs_0: SGMII PCS reset
+		 */
+		struct {
+			u32
+			__reserved_1:23,
+			gmac_0:1,
+			__reserved_2:7,
+			spcs_0:1;
+		};
+
+		u32 value;
+	};
+};
+
+/* Tx phase sync FIFO control register */
+struct nps_enet_phase_fifo_ctl {
+	union {
+		/* init: initialize serdes TX phase sync FIFO pointers
+		 * rst:  reset serdes TX phase sync FIFO
+		 */
+		struct {
+			u32
+			__reserved:30,
+			init:1,
+			rst:1;
+		};
+
+		u32 value;
+	};
+};
+
+/**
+ * struct nps_enet_priv - Storage of ENET's private information.
+ * @regs_base:      Base address of ENET memory-mapped control registers.
+ * @irq:            For RX/TX IRQ number.
+ * @tx_packet_sent: SW indication if frame is being sent.
+ * @tx_skb:         socket buffer of sent frame.
+ * @napi:           Structure for NAPI.
+ */
+struct nps_enet_priv {
+	void __iomem *regs_base;
+	s32 irq;
+	bool tx_packet_sent;
+	struct sk_buff *tx_skb;
+	struct napi_struct napi;
+	struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
+	struct nps_enet_ge_mac_cfg_3 ge_mac_cfg_3;
+};
+
+/**
+ * nps_reg_set - Sets ENET register with provided value.
+ * @priv:       Pointer to EZchip ENET private data structure.
+ * @reg:        Register offset from base address.
+ * @value:      Value to set in register.
+ */
+static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
+				    s32 reg, s32 value)
+{
+	iowrite32be(value, priv->regs_base + reg);
+}
+
+/**
+ * nps_reg_get - Gets value of specified ENET register.
+ * @priv:       Pointer to EZchip ENET private data structure.
+ * @reg:        Register offset from base address.
+ *
+ * returns:     Value of requested register.
+ */
+static inline u32 nps_enet_reg_get(struct nps_enet_priv *priv, s32 reg)
+{
+	return ioread32be(priv->regs_base + reg);
+}
+
+#endif /* _NPS_ENET_H */
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 5918c68..040c7f1 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on ARM
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 25e3425..b8de87b 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,9 +9,7 @@
 		   M523x || M527x || M5272 || M528x || M520x || M532x || \
 		   ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 66d47e4..e464aea 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2118,6 +2118,82 @@
 	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
 }
 
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct resource *r;
+	int s = 0;
+
+	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+	if (r)
+		s = resource_size(r);
+
+	return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+	defined(CONFIG_M520x) || defined(CONFIG_M532x) ||		\
+	defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+static u32 fec_enet_register_offset[] = {
+	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+	RMON_T_P_GTE2048, RMON_T_OCTETS,
+	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+	RMON_R_P_GTE2048, RMON_R_OCTETS,
+	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static u32 fec_enet_register_offset[] = {
+	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+			      struct ethtool_regs *regs, void *regbuf)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+	u32 *buf = (u32 *)regbuf;
+	u32 i, off;
+
+	memset(buf, 0, regs->len);
+
+	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+		off = fec_enet_register_offset[i] / 4;
+		buf[off] = readl(&theregs[off]);
+	}
+}
+
 static int fec_enet_get_ts_info(struct net_device *ndev,
 				struct ethtool_ts_info *info)
 {
@@ -2515,6 +2591,8 @@
 	.get_settings		= fec_enet_get_settings,
 	.set_settings		= fec_enet_set_settings,
 	.get_drvinfo		= fec_enet_get_drvinfo,
+	.get_regs_len		= fec_enet_get_regs_len,
+	.get_regs		= fec_enet_get_regs,
 	.nway_reset		= fec_enet_nway_reset,
 	.get_link		= ethtool_op_get_link,
 	.get_coalesce		= fec_enet_get_coalesce,
@@ -2778,12 +2856,14 @@
 	if (ret)
 		goto err_enet_alloc;
 
+	/* Init MAC prior to mii bus probe */
+	fec_restart(ndev);
+
 	/* Probe and connect to PHY when open the interface */
 	ret = fec_enet_mii_probe(ndev);
 	if (ret)
 		goto err_enet_mii_probe;
 
-	fec_restart(ndev);
 	napi_enable(&fep->napi);
 	phy_start(fep->phy_dev);
 	netif_tx_start_all_queues(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a583d89..a15663a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -353,6 +353,7 @@
 	tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
 	tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
 	writel(tmp, fep->hwp + FEC_ATIME_INC);
+	corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
 	writel(corr_period, fep->hwp + FEC_ATIME_CORR);
 	/* dummy read to update the timer. */
 	timecounter_read(&fep->tc);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9b3639e..56316db 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -86,7 +86,7 @@
 	struct net_device *dev = fep->ndev;
 	const struct fs_platform_info *fpi = fep->fpi;
 	cbd_t __iomem *bdp;
-	struct sk_buff *skb, *skbn, *skbt;
+	struct sk_buff *skb, *skbn;
 	int received = 0;
 	u16 pkt_len, sc;
 	int curidx;
@@ -161,10 +161,7 @@
 					skb_reserve(skbn, 2);	/* align IP header */
 					skb_copy_from_linear_data(skb,
 						      skbn->data, pkt_len);
-					/* swap */
-					skbt = skb;
-					skb = skbn;
-					skbn = skbt;
+					swap(skb, skbn);
 				}
 			} else {
 				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
@@ -490,6 +487,9 @@
 {
 	struct sk_buff *new_skb;
 
+	if (skb_linearize(skb))
+		return NULL;
+
 	/* Alloc new skb */
 	new_skb = netdev_alloc_skb(dev, skb->len + 4);
 	if (!new_skb)
@@ -515,12 +515,27 @@
 	cbd_t __iomem *bdp;
 	int curidx;
 	u16 sc;
-	int nr_frags = skb_shinfo(skb)->nr_frags;
+	int nr_frags;
 	skb_frag_t *frag;
 	int len;
-
 #ifdef CONFIG_FS_ENET_MPC5121_FEC
-	if (((unsigned long)skb->data) & 0x3) {
+	int is_aligned = 1;
+	int i;
+
+	if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
+		is_aligned = 0;
+	} else {
+		nr_frags = skb_shinfo(skb)->nr_frags;
+		frag = skb_shinfo(skb)->frags;
+		for (i = 0; i < nr_frags; i++, frag++) {
+			if (!IS_ALIGNED(frag->page_offset, 4)) {
+				is_aligned = 0;
+				break;
+			}
+		}
+	}
+
+	if (!is_aligned) {
 		skb = tx_skb_align_workaround(dev, skb);
 		if (!skb) {
 			/*
@@ -532,6 +547,7 @@
 		}
 	}
 #endif
+
 	spin_lock(&fep->tx_lock);
 
 	/*
@@ -539,6 +555,7 @@
 	 */
 	bdp = fep->cur_tx;
 
+	nr_frags = skb_shinfo(skb)->nr_frags;
 	if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
 		netif_stop_queue(dev);
 		spin_unlock(&fep->tx_lock);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4ee080d..ff87502 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -516,6 +516,15 @@
 	return &dev->stats;
 }
 
+static int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+	eth_mac_addr(dev, p);
+
+	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+	return 0;
+}
+
 static const struct net_device_ops gfar_netdev_ops = {
 	.ndo_open = gfar_enet_open,
 	.ndo_start_xmit = gfar_start_xmit,
@@ -526,7 +535,7 @@
 	.ndo_tx_timeout = gfar_timeout,
 	.ndo_do_ioctl = gfar_ioctl,
 	.ndo_get_stats = gfar_get_stats,
-	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_set_mac_address = gfar_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = gfar_netpoll,
@@ -1411,6 +1420,8 @@
 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
 	}
 
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
 	gfar_init_addr_hash_table(priv);
 
 	/* Insert receive time stamps into padding alignment bytes */
@@ -2254,7 +2265,6 @@
 	int i, rq = 0;
 	int do_tstamp, do_csum, do_vlan;
 	u32 bufaddr;
-	unsigned long flags;
 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
 
 	rq = skb->queue_mapping;
@@ -2434,19 +2444,6 @@
 
 	netdev_tx_sent_queue(txq, bytes_sent);
 
-	/* We can work in parallel with gfar_clean_tx_ring(), except
-	 * when modifying num_txbdfree. Note that we didn't grab the lock
-	 * when we were reading the num_txbdfree and checking for available
-	 * space, that's because outside of this function it can only grow,
-	 * and once we've got needed space, it cannot suddenly disappear.
-	 *
-	 * The lock also protects us from gfar_error(), which can modify
-	 * regs->tstat and thus retrigger the transfers, which is why we
-	 * also must grab the lock before setting ready bit for the first
-	 * to be transmitted BD.
-	 */
-	spin_lock_irqsave(&tx_queue->txlock, flags);
-
 	gfar_wmb();
 
 	txbdp_start->lstatus = cpu_to_be32(lstatus);
@@ -2463,8 +2460,15 @@
 
 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
+	/* We can work in parallel with gfar_clean_tx_ring(), except
+	 * when modifying num_txbdfree. Note that we didn't grab the lock
+	 * when we were reading the num_txbdfree and checking for available
+	 * space, that's because outside of this function it can only grow.
+	 */
+	spin_lock_bh(&tx_queue->txlock);
 	/* reduce TxBD free count */
 	tx_queue->num_txbdfree -= (nr_txbds);
+	spin_unlock_bh(&tx_queue->txlock);
 
 	/* If the next BD still needs to be cleaned up, then the bds
 	 * are full.  We need to tell the kernel to stop sending us stuff.
@@ -2478,9 +2482,6 @@
 	/* Tell the DMA to go go go */
 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
 
-	/* Unlock priv */
-	spin_unlock_irqrestore(&tx_queue->txlock, flags);
-
 	return NETDEV_TX_OK;
 
 dma_map_err:
@@ -2622,7 +2623,6 @@
 	skb_dirtytx = tx_queue->skb_dirtytx;
 
 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
-		unsigned long flags;
 
 		frags = skb_shinfo(skb)->nr_frags;
 
@@ -2686,9 +2686,9 @@
 			      TX_RING_MOD_MASK(tx_ring_size);
 
 		howmany++;
-		spin_lock_irqsave(&tx_queue->txlock, flags);
+		spin_lock(&tx_queue->txlock);
 		tx_queue->num_txbdfree += nr_txbds;
-		spin_unlock_irqrestore(&tx_queue->txlock, flags);
+		spin_unlock(&tx_queue->txlock);
 	}
 
 	/* If we freed a buffer, we can restart transmission, if necessary */
@@ -3411,21 +3411,12 @@
 		if (events & IEVENT_CRL)
 			dev->stats.tx_aborted_errors++;
 		if (events & IEVENT_XFUN) {
-			unsigned long flags;
-
 			netif_dbg(priv, tx_err, dev,
 				  "TX FIFO underrun, packet dropped\n");
 			dev->stats.tx_dropped++;
 			atomic64_inc(&priv->extra_stats.tx_underrun);
 
-			local_irq_save(flags);
-			lock_tx_qs(priv);
-
-			/* Reactivate the Tx Queues */
-			gfar_write(&regs->tstat, gfargrp->tstat);
-
-			unlock_tx_qs(priv);
-			local_irq_restore(flags);
+			schedule_work(&priv->reset_task);
 		}
 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
 	}
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 1085257..faee34e 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCMCIA
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  the questions about Fujitsu cards. If you say Y, you will be asked for
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index a54d897..dead17b 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on ARM
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 3b39fdd..d49bee3 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -798,7 +798,7 @@
 
 	for (i = 0; i < RX_DESC_NUM; i++)
 		if (priv->rx_buf[i])
-			put_page(virt_to_head_page(priv->rx_buf[i]));
+			skb_free_frag(priv->rx_buf[i]);
 
 	for (i = 0; i < TX_DESC_NUM; i++)
 		if (priv->tx_skb[i])
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 0ffdcd3..a5e077e 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -500,7 +500,6 @@
 		napi_gro_receive(&priv->napi, skb);
 		dev->stats.rx_packets++;
 		dev->stats.rx_bytes += skb->len;
-		dev->last_rx = jiffies;
 next:
 		pos = dma_ring_incr(pos, RX_DESC_NUM);
 	}
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/net/ethernet/hp/Kconfig
index a0b8ece..d4df78c 100644
--- a/drivers/net/ethernet/hp/Kconfig
+++ b/drivers/net/ethernet/hp/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on ISA || EISA || PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -22,9 +20,7 @@
 	tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
 	depends on (ISA || EISA || PCI)
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called hp100.
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index 9521e68..e8d61f6 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on NET_VENDOR_INTEL
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question does not directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index 563a1ac..99c1ceb 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM)
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b9df0cb..b60a34d 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2999,7 +2999,7 @@
 static void __init emac_make_bootlist(void)
 {
 	struct device_node *np = NULL;
-	int j, max, i = 0, k;
+	int j, max, i = 0;
 	int cell_indices[EMAC_BOOT_LIST_SIZE];
 
 	/* Collect EMACs */
@@ -3026,12 +3026,8 @@
 	for (i = 0; max > 1 && (i < (max - 1)); i++)
 		for (j = i; j < max; j++) {
 			if (cell_indices[i] > cell_indices[j]) {
-				np = emac_boot_list[i];
-				emac_boot_list[i] = emac_boot_list[j];
-				emac_boot_list[j] = np;
-				k = cell_indices[i];
-				cell_indices[i] = cell_indices[j];
-				cell_indices[j] = k;
+				swap(emac_boot_list[i], emac_boot_list[j]);
+				swap(cell_indices[i], cell_indices[j]);
 			}
 		}
 }
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 1813476..29bbb62 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@
 
 static const char ibmveth_driver_name[] = "ibmveth";
 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.04"
+#define ibmveth_driver_version "1.05"
 
 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -100,6 +100,8 @@
 	{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
 	{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
 	{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+	{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
+	{ "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
 };
 
 /* simple methods of getting data from the current rxq entry */
@@ -852,6 +854,10 @@
 	struct ibmveth_adapter *adapter = netdev_priv(dev);
 	int rx_csum = !!(features & NETIF_F_RXCSUM);
 	int rc;
+	netdev_features_t changed = features ^ dev->features;
+
+	if (features & NETIF_F_TSO & changed)
+		netdev_info(dev, "TSO feature requires all partitions to have updated driver");
 
 	if (rx_csum == adapter->rx_csum)
 		return 0;
@@ -1035,6 +1041,15 @@
 		descs[i+1].fields.address = dma_addr;
 	}
 
+	if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
+		/* Put -1 in the IP checksum to tell phyp it
+		 *  is a largesend packet and put the mss in the TCP checksum.
+		 */
+		ip_hdr(skb)->check = 0xffff;
+		tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
+		adapter->tx_large_packets++;
+	}
+
 	if (ibmveth_send(adapter, descs)) {
 		adapter->tx_send_failed++;
 		netdev->stats.tx_dropped++;
@@ -1080,6 +1095,7 @@
 	struct net_device *netdev = adapter->netdev;
 	int frames_processed = 0;
 	unsigned long lpar_rc;
+	struct iphdr *iph;
 
 restart_poll:
 	while (frames_processed < budget) {
@@ -1122,10 +1138,23 @@
 			skb_put(skb, length);
 			skb->protocol = eth_type_trans(skb, netdev);
 
-			if (csum_good)
+			if (csum_good) {
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
+					iph = (struct iphdr *)skb->data;
 
-			netif_receive_skb(skb);	/* send it up */
+					/* If the IP checksum is not offloaded and if the packet
+					 *  is large send, the checksum must be rebuilt.
+					 */
+					if (iph->check == 0xffff) {
+						iph->check = 0;
+						iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+						adapter->rx_large_packets++;
+					}
+				}
+			}
+
+			napi_gro_receive(napi, skb);	/* send it up */
 
 			netdev->stats.rx_packets++;
 			netdev->stats.rx_bytes += length;
@@ -1422,8 +1451,14 @@
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	netdev->features |= netdev->hw_features;
 
+	/* TSO is disabled by default */
+	netdev->hw_features |= NETIF_F_TSO;
+
 	memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
+	if (firmware_has_feature(FW_FEATURE_CMO))
+		memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
+
 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 		struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
 		int error;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 1f37499..41dedb1 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -104,7 +104,8 @@
 
 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
 static int pool_count[] = { 256, 512, 256, 256, 256 };
-static int pool_active[] = { 1, 1, 0, 0, 0};
+static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
+static int pool_active[] = { 1, 1, 0, 0, 1};
 
 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
 
@@ -160,6 +161,8 @@
     u64 rx_no_buffer;
     u64 tx_map_failed;
     u64 tx_send_failed;
+    u64 tx_large_packets;
+    u64 rx_large_packets;
 };
 
 /*
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f4ff465..4163b16 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -6,9 +6,7 @@
 	bool "Intel devices"
 	default y
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 1a450f4..d2657a4 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -874,7 +874,7 @@
 {
 	struct cb *cb;
 	unsigned long flags;
-	int err = 0;
+	int err;
 
 	spin_lock_irqsave(&nic->cb_lock, flags);
 
@@ -2922,9 +2922,7 @@
 
 	pci_set_master(pdev);
 
-	init_timer(&nic->watchdog);
-	nic->watchdog.function = e100_watchdog;
-	nic->watchdog.data = (unsigned long)nic;
+	setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
 
 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
 
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 983eb4e..74dc150 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2079,11 +2079,6 @@
 	return data;
 }
 
-static void e1000_free_frag(const void *data)
-{
-	put_page(virt_to_head_page(data));
-}
-
 /**
  * e1000_clean_rx_ring - Free Rx Buffers per Queue
  * @adapter: board private structure
@@ -2107,7 +2102,7 @@
 						 adapter->rx_buffer_len,
 						 DMA_FROM_DEVICE);
 			if (buffer_info->rxbuf.data) {
-				e1000_free_frag(buffer_info->rxbuf.data);
+				skb_free_frag(buffer_info->rxbuf.data);
 				buffer_info->rxbuf.data = NULL;
 			}
 		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
@@ -4594,28 +4589,28 @@
 			data = e1000_alloc_frag(adapter);
 			/* Failed allocation, critical failure */
 			if (!data) {
-				e1000_free_frag(olddata);
+				skb_free_frag(olddata);
 				adapter->alloc_rx_buff_failed++;
 				break;
 			}
 
 			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
 				/* give up */
-				e1000_free_frag(data);
-				e1000_free_frag(olddata);
+				skb_free_frag(data);
+				skb_free_frag(olddata);
 				adapter->alloc_rx_buff_failed++;
 				break;
 			}
 
 			/* Use new allocation */
-			e1000_free_frag(olddata);
+			skb_free_frag(olddata);
 		}
 		buffer_info->dma = dma_map_single(&pdev->dev,
 						  data,
 						  adapter->rx_buffer_len,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			e1000_free_frag(data);
+			skb_free_frag(data);
 			buffer_info->dma = 0;
 			adapter->alloc_rx_buff_failed++;
 			break;
@@ -4637,7 +4632,7 @@
 					 adapter->rx_buffer_len,
 					 DMA_FROM_DEVICE);
 
-			e1000_free_frag(data);
+			skb_free_frag(data);
 			buffer_info->rxbuf.data = NULL;
 			buffer_info->dma = 0;
 
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 08f22f3..2af603f 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 535a943..a2162e1 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index dc79ed8..5f70164 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -2010,7 +2010,7 @@
 	.flags2			= FLAG2_DISABLE_ASPM_L1
 				  | FLAG2_DISABLE_ASPM_L0S,
 	.pba			= 20,
-	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
+	.max_hw_frame_size	= VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
 	.get_variants		= e1000_get_variants_82571,
 	.mac_ops		= &e82571_mac_ops,
 	.phy_ops		= &e82_phy_ops_m88,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 2e758f7..abc6a9a 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0570c66..133d407 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0abc942..0b748d1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@
 #define DEFAULT_RADV			8
 #define BURST_RDTR			0x20
 #define BURST_RADV			0x20
+#define PCICFG_DESC_RING_STATUS		0xe4
+#define FLUSH_DESC_REQUIRED		0x100
 
 /* in the case of WTHRESH, it appears at least the 82571/2 hardware
  * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
@@ -384,6 +386,10 @@
 #define INCVALUE_SHIFT_25MHz	18
 #define INCPERIOD_25MHz		1
 
+#define INCVALUE_24MHz		125
+#define INCVALUE_SHIFT_24MHz	14
+#define INCPERIOD_24MHz		3
+
 /* Another drawback of scaling the incvalue by a large factor is the
  * 64-bit SYSTIM register overflows more quickly.  This is dealt with
  * by simply reading the clock before it overflows.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 11f486e..ad6daa6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -1516,8 +1516,19 @@
 static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 rctl;
+	u32 rctl, fext_nvm11, tarc0;
 
+	if (hw->mac.type == e1000_pch_spt) {
+		fext_nvm11 = er32(FEXTNVM11);
+		fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+		ew32(FEXTNVM11, fext_nvm11);
+		tarc0 = er32(TARC(0));
+		/* clear bits 28 & 29 (control of MULR concurrent requests) */
+		tarc0 &= 0xcfffffff;
+		/* set bit 29 (value of MULR requests is now 2) */
+		tarc0 |= 0x20000000;
+		ew32(TARC(0), tarc0);
+	}
 	if (hw->phy.media_type == e1000_media_type_fiber ||
 	    hw->phy.media_type == e1000_media_type_internal_serdes) {
 		switch (hw->mac.type) {
@@ -1542,7 +1553,7 @@
 static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 rctl;
+	u32 rctl, fext_nvm11, tarc0;
 	u16 phy_reg;
 
 	rctl = er32(RCTL);
@@ -1550,6 +1561,16 @@
 	ew32(RCTL, rctl);
 
 	switch (hw->mac.type) {
+	case e1000_pch_spt:
+		fext_nvm11 = er32(FEXTNVM11);
+		fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
+		ew32(FEXTNVM11, fext_nvm11);
+		tarc0 = er32(TARC(0));
+		/* clear bits 28 & 29 (control of MULR concurrent requests) */
+		/* set bit 29 (value of MULR requests is now 0) */
+		tarc0 &= 0xcfffffff;
+		ew32(TARC(0), tarc0);
+		/* fall through */
 	case e1000_80003es2lan:
 		if (hw->phy.media_type == e1000_media_type_fiber ||
 		    hw->phy.media_type == e1000_media_type_internal_serdes) {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 19e8c48..c9da465 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9d81c03..b074b9a 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -1014,8 +1014,7 @@
 		u16 speed, duplex, scale = 0;
 		u16 max_snoop, max_nosnoop;
 		u16 max_ltr_enc;	/* max LTR latency encoded */
-		s64 lat_ns;	/* latency (ns) */
-		s64 value;
+		u64 value;
 		u32 rxa;
 
 		if (!hw->adapter->max_frame_size) {
@@ -1040,14 +1039,11 @@
 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
 		 */
-		lat_ns = ((s64)rxa * 1024 -
-			  (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
-		if (lat_ns < 0)
-			lat_ns = 0;
-		else
-			do_div(lat_ns, speed);
+		rxa *= 512;
+		value = (rxa > hw->adapter->max_frame_size) ?
+			(rxa - hw->adapter->max_frame_size) * (16000 / speed) :
+			0;
 
-		value = lat_ns;
 		while (value > PCI_LTR_VALUE_MASK) {
 			scale++;
 			value = DIV_ROUND_UP(value, (1 << 5));
@@ -1563,7 +1559,7 @@
 	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
 	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
 		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
-		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+		adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 
 		hw->mac.ops.blink_led = NULL;
 	}
@@ -5681,7 +5677,7 @@
 				  | FLAG_HAS_FLASH
 				  | FLAG_APME_IN_WUC,
 	.pba			= 8,
-	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
+	.max_hw_frame_size	= VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
 	.get_variants		= e1000_get_variants_ich8lan,
 	.mac_ops		= &ich8_mac_ops,
 	.phy_ops		= &ich8_phy_ops,
@@ -5754,7 +5750,7 @@
 	.flags2			= FLAG2_HAS_PHY_STATS
 				  | FLAG2_HAS_EEE,
 	.pba			= 26,
-	.max_hw_frame_size	= 9018,
+	.max_hw_frame_size	= 9022,
 	.get_variants		= e1000_get_variants_ich8lan,
 	.mac_ops		= &ich8_mac_ops,
 	.phy_ops		= &ich8_phy_ops,
@@ -5774,7 +5770,7 @@
 	.flags2			= FLAG2_HAS_PHY_STATS
 				  | FLAG2_HAS_EEE,
 	.pba			= 26,
-	.max_hw_frame_size	= 9018,
+	.max_hw_frame_size	= 9022,
 	.get_variants		= e1000_get_variants_ich8lan,
 	.mac_ops		= &ich8_mac_ops,
 	.phy_ops		= &ich8_phy_ops,
@@ -5794,7 +5790,7 @@
 	.flags2			= FLAG2_HAS_PHY_STATS
 				  | FLAG2_HAS_EEE,
 	.pba			= 26,
-	.max_hw_frame_size	= 9018,
+	.max_hw_frame_size	= 9022,
 	.get_variants		= e1000_get_variants_ich8lan,
 	.mac_ops		= &ich8_mac_ops,
 	.phy_ops		= &ich8_phy_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 770a573..2645985 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -98,8 +98,15 @@
 #define E1000_FEXTNVM6_K1_OFF_ENABLE	0x80000000
 /* bit for disabling packet buffer read */
 #define E1000_FEXTNVM7_DISABLE_PB_READ	0x00040000
-
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE	0x00000004
 #define E1000_FEXTNVM7_DISABLE_SMB_PERST	0x00000020
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS	0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS	0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ		0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX	0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
 
 #define K1_ENTRY_LATENCY	0
 #define K1_MIN_TIME		1
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 30b74d5..e59d7c2 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 0513d90..8284618 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 06edfca..cc9b3be 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index a8c27f9..0b9ea59 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c509a5c..e62b9dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -3525,22 +3525,30 @@
 	switch (hw->mac.type) {
 	case e1000_pch2lan:
 	case e1000_pch_lpt:
-	case e1000_pch_spt:
-		/* On I217, I218 and I219, the clock frequency is 25MHz
-		 * or 96MHz as indicated by the System Clock Frequency
-		 * Indication
-		 */
-		if (((hw->mac.type != e1000_pch_lpt) &&
-		     (hw->mac.type != e1000_pch_spt)) ||
-		    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
 			/* Stable 96MHz frequency */
 			incperiod = INCPERIOD_96MHz;
 			incvalue = INCVALUE_96MHz;
 			shift = INCVALUE_SHIFT_96MHz;
 			adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+		} else {
+			/* Stable 25MHz frequency */
+			incperiod = INCPERIOD_25MHz;
+			incvalue = INCVALUE_25MHz;
+			shift = INCVALUE_SHIFT_25MHz;
+			adapter->cc.shift = shift;
+		}
+		break;
+	case e1000_pch_spt:
+		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+			/* Stable 24MHz frequency */
+			incperiod = INCPERIOD_24MHz;
+			incvalue = INCVALUE_24MHz;
+			shift = INCVALUE_SHIFT_24MHz;
+			adapter->cc.shift = shift;
 			break;
 		}
-		/* fall-through */
+		return -EINVAL;
 	case e1000_82574:
 	case e1000_82583:
 		/* Stable 25MHz frequency */
@@ -3788,6 +3796,108 @@
 }
 
 /**
+ * e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ *
+ * We want to clear all pending descriptors from the TX ring.
+ * zeroing happens when the HW reads the regs. We  assign the ring itself as
+ * the data of the next descriptor. We don't care about the data we are about
+ * to reset the HW.
+ */
+static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc = NULL;
+	u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+	u16 size = 512;
+
+	tctl = er32(TCTL);
+	ew32(TCTL, tctl | E1000_TCTL_EN);
+	tdt = er32(TDT(0));
+	BUG_ON(tdt != tx_ring->next_to_use);
+	tx_desc =  E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
+	tx_desc->buffer_addr = tx_ring->dma;
+
+	tx_desc->lower.data = cpu_to_le32(txd_lower | size);
+	tx_desc->upper.data = 0;
+	/* flush descriptors to memory before notifying the HW */
+	wmb();
+	tx_ring->next_to_use++;
+	if (tx_ring->next_to_use == tx_ring->count)
+		tx_ring->next_to_use = 0;
+	ew32(TDT(0), tx_ring->next_to_use);
+	mmiowb();
+	usleep_range(200, 250);
+}
+
+/**
+ * e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ *
+ * Mark all descriptors in the RX ring as consumed and disable the rx ring
+ */
+static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
+{
+	u32 rctl, rxdctl;
+	struct e1000_hw *hw = &adapter->hw;
+
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	e1e_flush();
+	usleep_range(100, 150);
+
+	rxdctl = er32(RXDCTL(0));
+	/* zero the lower 14 bits (prefetch and host thresholds) */
+	rxdctl &= 0xffffc000;
+
+	/* update thresholds: prefetch threshold to 31, host threshold to 1
+	 * and make sure the granularity is "descriptors" and not "cache lines"
+	 */
+	rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+	ew32(RXDCTL(0), rxdctl);
+	/* momentarily enable the RX ring for the changes to take effect */
+	ew32(RCTL, rctl | E1000_RCTL_EN);
+	e1e_flush();
+	usleep_range(100, 150);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting the HW
+ * or before changing the device state to D3 during runtime (runtime PM).
+ *
+ * Failure to do this will cause the HW to enter a unit hang state which can
+ * only be released by PCI reset on the device
+ *
+ */
+
+static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
+{
+	u16 hang_state;
+	u32 fext_nvm11, tdlen;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* First, disable MULR fix in FEXTNVM11 */
+	fext_nvm11 = er32(FEXTNVM11);
+	fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+	ew32(FEXTNVM11, fext_nvm11);
+	/* do nothing if we're not in faulty state, or if the queue is empty */
+	tdlen = er32(TDLEN(0));
+	pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+			     &hang_state);
+	if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
+		return;
+	e1000_flush_tx_ring(adapter);
+	/* recheck, maybe the fault is caused by the rx ring */
+	pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+			     &hang_state);
+	if (hang_state & FLUSH_DESC_REQUIRED)
+		e1000_flush_rx_ring(adapter);
+}
+
+/**
  * e1000e_reset - bring the hardware into a known good state
  *
  * This function boots the hardware and enables some settings that
@@ -3807,7 +3917,7 @@
 	/* reset Packet Buffer Allocation to default */
 	ew32(PBA, pba);
 
-	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+	if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
 		/* To maintain wire speed transmits, the Tx FIFO should be
 		 * large enough to accommodate two full transmit packets,
 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
@@ -3943,6 +4053,8 @@
 		}
 	}
 
+	if (hw->mac.type == e1000_pch_spt)
+		e1000_flush_desc_rings(adapter);
 	/* Allow time for pending master requests to run */
 	mac->ops.reset_hw(hw);
 
@@ -4016,6 +4128,20 @@
 		phy_data &= ~IGP02E1000_PM_SPD;
 		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
 	}
+	if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+		u32 reg;
+
+		/* Fextnvm7 @ 0xe4[2] = 1 */
+		reg = er32(FEXTNVM7);
+		reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
+		ew32(FEXTNVM7, reg);
+		/* Fextnvm9 @ 0x5bb4[13:12] = 11 */
+		reg = er32(FEXTNVM9);
+		reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
+		       E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
+		ew32(FEXTNVM9, reg);
+	}
+
 }
 
 int e1000e_up(struct e1000_adapter *adapter)
@@ -4115,8 +4241,6 @@
 	spin_unlock(&adapter->stats64_lock);
 
 	e1000e_flush_descriptors(adapter);
-	e1000_clean_tx_ring(adapter->tx_ring);
-	e1000_clean_rx_ring(adapter->rx_ring);
 
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
@@ -4127,8 +4251,14 @@
 	    e1000_lv_jumbo_workaround_ich8lan(hw, false))
 		e_dbg("failed to disable jumbo frame workaround mode\n");
 
-	if (reset && !pci_channel_offline(adapter->pdev))
-		e1000e_reset(adapter);
+	if (!pci_channel_offline(adapter->pdev)) {
+		if (reset)
+			e1000e_reset(adapter);
+		else if (hw->mac.type == e1000_pch_spt)
+			e1000_flush_desc_rings(adapter);
+	}
+	e1000_clean_tx_ring(adapter->tx_ring);
+	e1000_clean_rx_ring(adapter->rx_ring);
 }
 
 void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4151,9 +4281,16 @@
 						     cc);
 	struct e1000_hw *hw = &adapter->hw;
 	cycle_t systim, systim_next;
+	/* SYSTIMH latching upon SYSTIML read does not work well. To fix that
+	 * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
+	 * to occur between reads, so if we read a vale close to overflow, we
+	 * wait for overflow to occur and read both registers when its safe.
+	 */
+	u32 systim_overflow_latch_fix = 0x3FFFFFFF;
 
-	/* latch SYSTIMH on read of SYSTIML */
-	systim = (cycle_t)er32(SYSTIML);
+	do {
+		systim = (cycle_t)er32(SYSTIML);
+	} while (systim > systim_overflow_latch_fix);
 	systim |= (cycle_t)er32(SYSTIMH) << 32;
 
 	if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
@@ -4196,9 +4333,9 @@
 {
 	struct net_device *netdev = adapter->netdev;
 
-	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+	adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 	adapter->rx_ps_bsize0 = 128;
-	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 	adapter->tx_ring_count = E1000_DEFAULT_TXD;
 	adapter->rx_ring_count = E1000_DEFAULT_RXD;
@@ -5781,17 +5918,17 @@
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
+	int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 
 	/* Jumbo frame support */
-	if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+	if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
 	    !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
 		e_err("Jumbo Frames not supported.\n");
 		return -EINVAL;
 	}
 
 	/* Supported frame sizes */
-	if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+	if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
 	    (max_frame > adapter->max_hw_frame_size)) {
 		e_err("Unsupported MTU setting\n");
 		return -EINVAL;
@@ -5831,10 +5968,8 @@
 		adapter->rx_buffer_len = 4096;
 
 	/* adjust allocation if LPE protects us, and we aren't using SBP */
-	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-	    (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
-		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
-		    + ETH_FCS_LEN;
+	if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
+		adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 
 	if (netif_running(netdev))
 		e1000e_up(adapter);
@@ -6678,6 +6813,19 @@
 	}
 }
 
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+					    netdev_features_t features)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+	if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
+		features &= ~NETIF_F_RXFCS;
+
+	return features;
+}
+
 static int e1000_set_features(struct net_device *netdev,
 			      netdev_features_t features)
 {
@@ -6734,6 +6882,7 @@
 	.ndo_poll_controller	= e1000_netpoll,
 #endif
 	.ndo_set_features = e1000_set_features,
+	.ndo_fix_features = e1000_fix_features,
 };
 
 /**
@@ -7289,7 +7438,7 @@
 
 	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 		e1000e_driver_version);
-	pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+	pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
 	ret = pci_register_driver(&e1000_driver);
 
 	return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index fa6b103..49f205c 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 342bf69..5d46967 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index aa1923f..6d8c39a 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b2005e1..de13aea 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 537d278..55bfe47 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 8d7b21d..25a0ad5 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 85eefc4..b24e5fe 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,8 @@
 #define E1000_FEXTNVM4	0x00024	/* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM6	0x00010	/* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7	0x000E4	/* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9	0x5BB4  /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11	0x5BBC  /* Future Extended NVM 11 - RW */
 #define E1000_PCIEANACFG	0x00F18	/* PCIE Analog Config */
 #define E1000_FCT	0x00030	/* Flow Control Type - RW */
 #define E1000_VET	0x00038	/* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4b9d9f8..c6dc968 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -124,7 +124,7 @@
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
 	char *p = (char *)data;
-	int i;
+	unsigned int i;
 
 	switch (stringset) {
 	case ETH_SS_TEST:
@@ -143,12 +143,13 @@
 			p += ETH_GSTRING_LEN;
 		}
 
-		if (interface->hw.mac.type != fm10k_mac_vf)
+		if (interface->hw.mac.type != fm10k_mac_vf) {
 			for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
 				memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
 				       ETH_GSTRING_LEN);
 				p += ETH_GSTRING_LEN;
 			}
+		}
 
 		for (i = 0; i < interface->hw.mac.max_queues; i++) {
 			sprintf(p, "tx_queue_%u_packets", i);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5b08e62..94571e6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -400,11 +400,31 @@
 	return num_vfs;
 }
 
+static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
+				       struct fm10k_vf_info *vf_info)
+{
+	struct fm10k_hw *hw = &interface->hw;
+
+	/* assigning the MAC address will send a mailbox message */
+	fm10k_mbx_lock(interface);
+
+	/* disable LPORT for this VF which clears switch rules */
+	hw->iov.ops.reset_lport(hw, vf_info);
+
+	/* assign new MAC+VLAN for this VF */
+	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
+
+	/* re-enable the LPORT for this VF */
+	hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
+			      FM10K_VF_FLAG_MULTI_CAPABLE);
+
+	fm10k_mbx_unlock(interface);
+}
+
 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
 {
 	struct fm10k_intfc *interface = netdev_priv(netdev);
 	struct fm10k_iov_data *iov_data = interface->iov_data;
-	struct fm10k_hw *hw = &interface->hw;
 	struct fm10k_vf_info *vf_info;
 
 	/* verify SR-IOV is active and that vf idx is valid */
@@ -419,13 +439,7 @@
 	vf_info = &iov_data->vf_info[vf_idx];
 	ether_addr_copy(vf_info->mac, mac);
 
-	/* assigning the MAC will send a mailbox message so lock is needed */
-	fm10k_mbx_lock(interface);
-
-	/* assign MAC address to VF */
-	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
-
-	fm10k_mbx_unlock(interface);
+	fm10k_reset_vf_info(interface, vf_info);
 
 	return 0;
 }
@@ -455,16 +469,10 @@
 	/* record default VLAN ID for VF */
 	vf_info->pf_vid = vid;
 
-	/* assigning the VLAN will send a mailbox message so lock is needed */
-	fm10k_mbx_lock(interface);
-
 	/* Clear the VLAN table for the VF */
 	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
 
-	/* Update VF assignment and trigger reset */
-	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
-
-	fm10k_mbx_unlock(interface);
+	fm10k_reset_vf_info(interface, vf_info);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index c754b20..982fdcd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -269,16 +269,19 @@
 			      struct sk_buff *skb)
 {
 	struct page *page = rx_buffer->page;
+	unsigned char *va = page_address(page) + rx_buffer->page_offset;
 	unsigned int size = le16_to_cpu(rx_desc->w.length);
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = FM10K_RX_BUFSZ;
 #else
-	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
+	unsigned int pull_len;
 
-	if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
-		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+	if (unlikely(skb_is_nonlinear(skb)))
+		goto add_tail_frag;
 
+	if (likely(size <= FM10K_RX_HDR_LEN)) {
 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 		/* page is not reserved, we can reuse buffer as-is */
@@ -290,8 +293,21 @@
 		return false;
 	}
 
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	va += pull_len;
+	size -= pull_len;
+
+add_tail_frag:
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-			rx_buffer->page_offset, size, truesize);
+			(unsigned long)va & ~PAGE_MASK, size, truesize);
 
 	return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
 }
@@ -518,44 +534,6 @@
 }
 
 /**
- * fm10k_pull_tail - fm10k specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
- *
- * This function is an fm10k specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void fm10k_pull_tail(struct sk_buff *skb)
-{
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned char *va;
-	unsigned int pull_len;
-
-	/* it is valid to use page_address instead of kmap since we are
-	 * working with pages allocated out of the lomem pool per
-	 * alloc_page(GFP_ATOMIC)
-	 */
-	va = skb_frag_address(frag);
-
-	/* we need the header to contain the greater of either ETH_HLEN or
-	 * 60 bytes if the skb->len is less than 60 for skb_pad.
-	 */
-	pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-	/* update all of the pointers */
-	skb_frag_size_sub(frag, pull_len);
-	frag->page_offset += pull_len;
-	skb->data_len -= pull_len;
-	skb->tail += pull_len;
-}
-
-/**
  * fm10k_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
@@ -580,10 +558,6 @@
 		return true;
 	}
 
-	/* place header in linear portion of buffer */
-	if (skb_is_nonlinear(skb))
-		fm10k_pull_tail(skb);
-
 	/* if eth_skb_pad returns an error the skb was freed */
 	if (eth_skb_pad(skb))
 		return true;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 1b27383..1a4b526 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1259,16 +1259,11 @@
 				   struct fm10k_mbx_info *mbx)
 {
 	const u32 *hdr = &mbx->mbx_hdr;
-	s32 err_no;
 	u16 head;
 
 	/* we will need to pull all of the fields for verification */
 	head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
 
-	/* we only have lower 10 bits of error number so add upper bits */
-	err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
-	err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
-
 	switch (mbx->state) {
 	case FM10K_STATE_OPEN:
 	case FM10K_STATE_DISCONNECT:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2f4f41b..99228bf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -923,18 +923,12 @@
 	struct fm10k_intfc *interface = netdev_priv(dev);
 	struct fm10k_hw *hw = &interface->hw;
 	u16 vid, glort = interface->glort;
-	s32 err;
-
-	if (!is_multicast_ether_addr(addr))
-		return -EADDRNOTAVAIL;
 
 	/* update table with current entries */
 	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
-		err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
-		if (err)
-			return err;
+		hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
 	}
 
 	return 0;
@@ -1339,8 +1333,7 @@
 	dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
 	dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
 	dglort.glort = interface->glort;
-	if (l2_accel)
-		dglort.shared_l = fls(l2_accel->size);
+	dglort.shared_l = fls(l2_accel->size);
 	hw->mac.ops.configure_dglort_map(hw, &dglort);
 
 	/* If table is empty remove it */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index df9fda3..ce53ff2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1559,6 +1559,7 @@
 
 	/* free any buffers still on the rings */
 	fm10k_clean_all_tx_rings(interface);
+	fm10k_clean_all_rx_rings(interface);
 }
 
 /**
@@ -1740,30 +1741,18 @@
 	struct fm10k_intfc *interface;
 	struct fm10k_hw *hw;
 	int err;
-	u64 dma_mask;
 
 	err = pci_enable_device_mem(pdev);
 	if (err)
 		return err;
 
-	/* By default fm10k only supports a 48 bit DMA mask */
-	dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
-
-	if ((dma_mask <= DMA_BIT_MASK(32)) ||
-	    dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
-		dma_mask &= DMA_BIT_MASK(32);
-
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+	if (err)
 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev,
-					"No usable DMA configuration, aborting\n");
-				goto err_dma;
-			}
-		}
+	if (err) {
+		dev_err(&pdev->dev,
+			"DMA configuration failed: %d\n", err);
+		goto err_dma;
 	}
 
 	err = pci_request_selected_regions(pdev,
@@ -1772,7 +1761,7 @@
 					   fm10k_driver_name);
 	if (err) {
 		dev_err(&pdev->dev,
-			"pci_request_selected_regions failed 0x%x\n", err);
+			"pci_request_selected_regions failed: %d\n", err);
 		goto err_pci_reg;
 	}
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 891e218..3ca0233 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1046,6 +1046,12 @@
 		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
 	}
 
+	/* repeat the first ring for all the remaining VF rings */
+	for (i = queues_per_pool; i < qmap_stride; i++) {
+		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
+		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
+	}
+
 	return 0;
 }
 
@@ -1345,6 +1351,14 @@
 			err = fm10k_update_lport_state_pf(hw, vf_info->glort,
 							  1, false);
 
+		/* we need to clear VF_FLAG_ENABLED flags in order to ensure
+		 * that we actually re-enable the LPORT state below. Note that
+		 * this has no impact if the VF is already disabled, as the
+		 * flags are already cleared.
+		 */
+		if (!err)
+			vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
+
 		/* when enabling the port we should reset the rate limiters */
 		hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
 
@@ -1786,8 +1800,8 @@
 	if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
 		return FM10K_ERR_PARAM;
 
-	if (ppb < 0)
-		systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE;
+	if (ppb > 0)
+		systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
 
 	fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 7ab1db4..40a0dbc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -81,26 +81,26 @@
 	__le16	glort;
 	u8	flags;
 	u8	action;
-};
+} __packed;
 
 struct fm10k_global_table_data {
 	__le32	used;
 	__le32	avail;
-};
+} __packed;
 
 struct fm10k_swapi_error {
 	__le32				status;
 	struct fm10k_global_table_data	mac;
 	struct fm10k_global_table_data	nexthop;
 	struct fm10k_global_table_data	ffu;
-};
+} __packed;
 
 struct fm10k_swapi_1588_timestamp {
 	__le64 egress;
 	__le64 ingress;
 	__le16 dglort;
 	__le16 sglort;
-};
+} __packed;
 
 s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
 extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 9043633..b4945e8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -70,16 +70,16 @@
 	 * if none are present then insert skb in tail of list
 	 */
 	skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
-	if (!skb)
+	if (!skb) {
+		skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
 		__skb_queue_tail(list, clone);
+	}
 
 	spin_unlock_irqrestore(&list->lock, flags);
 
 	/* if list is already has one then we just free the clone */
 	if (skb)
-		kfree_skb(skb);
-	else
-		skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+		dev_kfree_skb(clone);
 }
 
 void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
@@ -103,9 +103,10 @@
 	if (!skb)
 		return;
 
-	/* timestamp the sk_buff and return it to the socket */
+	/* timestamp the sk_buff and free out copy */
 	fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
-	skb_complete_tx_timestamp(skb, &shhwtstamps);
+	skb_tstamp_tx(skb, &shhwtstamps);
+	dev_kfree_skb_any(skb);
 }
 
 void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 4af9668..2a17d82 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -369,7 +369,7 @@
 /* Registers contained in BAR 4 for Switch management */
 #define FM10K_SW_SYSTIME_ADJUST	0x0224D
 #define FM10K_SW_SYSTIME_ADJUST_MASK		0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE	0x80000000
+#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE	0x80000000
 #define FM10K_SW_SYSTIME_PULSE(_n)	((_n) + 0x02252)
 
 enum fm10k_int_source {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5d47307..ec76c3f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -182,6 +182,7 @@
 enum i40e_fd_stat_idx {
 	I40E_FD_STAT_ATR,
 	I40E_FD_STAT_SB,
+	I40E_FD_STAT_ATR_TUNNEL,
 	I40E_FD_STAT_PF_COUNT
 };
 #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@
 			(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
 #define I40E_FD_SB_STAT_IDX(pf_id)  \
 			(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+			(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
 
 struct i40e_fdir_filter {
 	struct hlist_node fdir_node;
@@ -263,8 +266,6 @@
 
 	struct hlist_head fdir_filter_list;
 	u16 fdir_pf_active_filters;
-	u16 fd_sb_cnt_idx;
-	u16 fd_atr_cnt_idx;
 	unsigned long fd_flush_timestamp;
 	u32 fd_flush_cnt;
 	u32 fd_add_err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4cbaaeb..9a68c65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@
 	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
 	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
 	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+	I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
 	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
 
 	/* LPI stats */
@@ -1548,6 +1549,17 @@
 	return *data;
 }
 
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+	struct i40e_vf *vfs = pf->vf;
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vfs; i++)
+		if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+			return true;
+	return false;
+}
+
 static void i40e_diag_test(struct net_device *netdev,
 			   struct ethtool_test *eth_test, u64 *data)
 {
@@ -1560,6 +1572,20 @@
 		netif_info(pf, drv, netdev, "offline testing starting\n");
 
 		set_bit(__I40E_TESTING, &pf->state);
+
+		if (i40e_active_vfs(pf)) {
+			dev_warn(&pf->pdev->dev,
+				 "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+			data[I40E_ETH_TEST_REG]		= 1;
+			data[I40E_ETH_TEST_EEPROM]	= 1;
+			data[I40E_ETH_TEST_INTR]	= 1;
+			data[I40E_ETH_TEST_LOOPBACK]	= 1;
+			data[I40E_ETH_TEST_LINK]	= 1;
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+			clear_bit(__I40E_TESTING, &pf->state);
+			goto skip_ol_tests;
+		}
+
 		/* If the device is online then take it offline */
 		if (if_running)
 			/* indicate we're in test mode */
@@ -1605,6 +1631,8 @@
 		data[I40E_ETH_TEST_LOOPBACK] = 0;
 	}
 
+skip_ol_tests:
+
 	netif_info(pf, drv, netdev, "testing finished\n");
 }
 
@@ -2265,7 +2293,7 @@
 	input->pctype = 0;
 	input->dest_vsi = vsi->id;
 	input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
-	input->cnt_index  = pf->fd_sb_cnt_idx;
+	input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
 	input->flow_type = fsp->flow_type;
 	input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1803afe..c8b621e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -118,7 +118,7 @@
  *
  * The FC EOF is converted to the value understood by HW for descriptor
  * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
  **/
 static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
 {
@@ -132,9 +132,12 @@
 	case FC_EOF_A:
 		return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
 	default:
-		/* FIXME: still returns 0 */
-		pr_err("Unrecognized EOF %x\n", eof);
-		return 0;
+		/* Supported valid eof shall be already checked by
+		 * calling i40e_fcoe_eof_is_supported() first,
+		 * therefore this default case shall never hit.
+		 */
+		WARN_ON(1);
+		return -EINVAL;
 	}
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5b5bea1..48a52b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -772,9 +772,8 @@
 
 	dcb_cfg = &hw->local_dcbx_config;
 
-	/* See if DCB enabled with PFC TC */
-	if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
-	    !(dcb_cfg->pfc.pfcenable)) {
+	/* Collect Link XOFF stats when PFC is disabled */
+	if (!dcb_cfg->pfc.pfcenable) {
 		i40e_update_link_xoff_rx(pf);
 		return;
 	}
@@ -1097,12 +1096,18 @@
 			   &osd->rx_jabber, &nsd->rx_jabber);
 
 	/* FDIR stats */
-	i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+	i40e_stat_update32(hw,
+			   I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
 			   pf->stat_offsets_loaded,
 			   &osd->fd_atr_match, &nsd->fd_atr_match);
-	i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+	i40e_stat_update32(hw,
+			   I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
 			   pf->stat_offsets_loaded,
 			   &osd->fd_sb_match, &nsd->fd_sb_match);
+	i40e_stat_update32(hw,
+		      I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+		      pf->stat_offsets_loaded,
+		      &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
 
 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
 	nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@
 		pf->fd_add_err = pf->fd_atr_cnt = 0;
 		if (pf->fd_tcp_rule > 0) {
 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
 			pf->fd_tcp_rule = 0;
 		}
 		i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@
 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
 		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
 			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
-			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
 		}
 	}
 	/* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@
 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
 		}
 	}
 }
@@ -5469,7 +5477,8 @@
 
 		if (!(time_after(jiffies, min_flush_time)) &&
 		    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
-			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
 			disable_atr = true;
 		}
 
@@ -5496,7 +5505,8 @@
 			if (!disable_atr)
 				pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 			clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
-			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
 		}
 	}
 }
@@ -7680,12 +7690,8 @@
 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-		/* Setup a counter for fd_atr per PF */
-		pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
 		if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-			/* Setup a counter for fd_sb per PF */
-			pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
 		} else {
 			dev_info(&pf->pdev->dev,
 				 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7775,7 +7781,8 @@
 		pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
 		pf->fdir_pf_active_filters = 0;
 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-		dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
 		/* if ATR was auto disabled it can be re-enabled. */
 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
@@ -8062,7 +8069,7 @@
 #ifdef HAVE_BRIDGE_FILTER
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 				   struct net_device *dev,
-				   u32 __always_unused filter_mask, int nlflags)
+				   u32 filter_mask, int nlflags)
 #else
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 				   struct net_device *dev, int nlflags)
@@ -8088,7 +8095,7 @@
 		return 0;
 
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
-				       nlflags);
+				       nlflags, 0, 0, filter_mask, NULL);
 }
 #endif /* HAVE_BRIDGE_ATTRIBS */
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9d95042d..9a4f2bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -165,9 +165,6 @@
 	tx_desc->cmd_type_offset_bsz =
 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
-	/* set the timestamp */
-	tx_buf->time_stamp = jiffies;
-
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.
 	 */
@@ -283,7 +280,8 @@
 	if (add) {
 		pf->fd_tcp_rule++;
 		if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 		}
 	} else {
@@ -291,7 +289,8 @@
 				  (pf->fd_tcp_rule - 1) : 0;
 		if (pf->fd_tcp_rule == 0) {
 			pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-			dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+			if (I40E_DEBUG_FD & pf->hw.debug_mask)
+				dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
 		}
 	}
 
@@ -501,7 +500,8 @@
 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
 			    !(pf->auto_disable_flags &
 				     I40E_FLAG_FD_SB_ENABLED)) {
-				dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+				if (I40E_DEBUG_FD & pf->hw.debug_mask)
+					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
 				pf->auto_disable_flags |=
 							I40E_FLAG_FD_SB_ENABLED;
 			}
@@ -807,10 +807,6 @@
 			 tx_ring->vsi->seid,
 			 tx_ring->queue_index,
 			 tx_ring->next_to_use, i);
-		dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-			 "  time_stamp           <%lx>\n"
-			 "  jiffies              <%lx>\n",
-			 tx_ring->tx_bi[i].time_stamp, jiffies);
 
 		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -1653,9 +1649,6 @@
 		/* ERR_MASK will only have valid bits if EOP set */
 		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
-			/* TODO: shouldn't we increment a counter indicating the
-			 * drop?
-			 */
 			continue;
 		}
 
@@ -1688,7 +1681,6 @@
 		skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-		rx_ring->netdev->last_rx = jiffies;
 		rx_desc->wb.qword1.status_error_len = 0;
 
 	} while (likely(total_rx_packets < budget));
@@ -1821,7 +1813,6 @@
 #endif
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-		rx_ring->netdev->last_rx = jiffies;
 		rx_desc->wb.qword1.status_error_len = 0;
 	} while (likely(total_rx_packets < budget));
 
@@ -1925,11 +1916,11 @@
  * i40e_atr - Add a Flow Director ATR filter
  * @tx_ring:  ring to add programming descriptor to
  * @skb:      send buffer
- * @flags:    send flags
+ * @tx_flags: send tx flags
  * @protocol: wire protocol
  **/
 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
-		     u32 flags, __be16 protocol)
+		     u32 tx_flags, __be16 protocol)
 {
 	struct i40e_filter_program_desc *fdir_desc;
 	struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1954,25 +1945,38 @@
 	if (!tx_ring->atr_sample_rate)
 		return;
 
-	/* snag network header to get L4 type and address */
-	hdr.network = skb_network_header(skb);
-
-	/* Currently only IPv4/IPv6 with TCP is supported */
-	if (protocol == htons(ETH_P_IP)) {
-		if (hdr.ipv4->protocol != IPPROTO_TCP)
-			return;
-
-		/* access ihl as a u8 to avoid unaligned access on ia64 */
-		hlen = (hdr.network[0] & 0x0F) << 2;
-	} else if (protocol == htons(ETH_P_IPV6)) {
-		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
-			return;
-
-		hlen = sizeof(struct ipv6hdr);
-	} else {
+	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
 		return;
+
+	if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+		/* snag network header to get L4 type and address */
+		hdr.network = skb_network_header(skb);
+
+		/* Currently only IPv4/IPv6 with TCP is supported
+		 * access ihl as u8 to avoid unaligned access on ia64
+		 */
+		if (tx_flags & I40E_TX_FLAGS_IPV4)
+			hlen = (hdr.network[0] & 0x0F) << 2;
+		else if (protocol == htons(ETH_P_IPV6))
+			hlen = sizeof(struct ipv6hdr);
+		else
+			return;
+	} else {
+		hdr.network = skb_inner_network_header(skb);
+		hlen = skb_inner_network_header_len(skb);
 	}
 
+	/* Currently only IPv4/IPv6 with TCP is supported
+	 * Note: tx_flags gets modified to reflect inner protocols in
+	 * tx_enable_csum function if encap is enabled.
+	 */
+	if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+	    (hdr.ipv4->protocol != IPPROTO_TCP))
+		return;
+	else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+		 (hdr.ipv6->nexthdr != IPPROTO_TCP))
+		return;
+
 	th = (struct tcphdr *)(hdr.network + hlen);
 
 	/* Due to lack of space, no more new filters can be programmed */
@@ -2022,9 +2026,16 @@
 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
-	dtype_cmd |=
-		((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
-		I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+	if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+		dtype_cmd |=
+			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+	else
+		dtype_cmd |=
+			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
 	fdir_desc->rsvd = cpu_to_le32(0);
@@ -2045,13 +2056,13 @@
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
 #ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-			       struct i40e_ring *tx_ring,
-			       u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 				      struct i40e_ring *tx_ring,
 				      u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+					     struct i40e_ring *tx_ring,
+					     u32 *flags)
 #endif
 {
 	__be16 protocol = skb->protocol;
@@ -2119,16 +2130,14 @@
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-		    u32 tx_flags, __be16 protocol, u8 *hdr_len,
-		    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+		    u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+		    u32 *cd_tunneling)
 {
 	u32 cd_cmd, cd_tso_len, cd_mss;
 	struct ipv6hdr *ipv6h;
@@ -2220,12 +2229,12 @@
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 				u32 *td_cmd, u32 *td_offset,
 				struct i40e_ring *tx_ring,
 				u32 *cd_tunneling)
@@ -2241,6 +2250,7 @@
 		switch (ip_hdr(skb)->protocol) {
 		case IPPROTO_UDP:
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
 		default:
 			return;
@@ -2250,18 +2260,17 @@
 		this_ipv6_hdr = inner_ipv6_hdr(skb);
 		this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-		if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
-			if (tx_flags & I40E_TX_FLAGS_TSO) {
+		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+			if (*tx_flags & I40E_TX_FLAGS_TSO) {
 				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
 				ip_hdr(skb)->check = 0;
 			} else {
 				*cd_tunneling |=
 					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
 			}
-		} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
 			*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-			if (tx_flags & I40E_TX_FLAGS_TSO)
+			if (*tx_flags & I40E_TX_FLAGS_TSO)
 				ip_hdr(skb)->check = 0;
 		}
 
@@ -2273,8 +2282,8 @@
 					skb_transport_offset(skb)) >> 1) <<
 				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 		if (this_ip_hdr->version == 6) {
-			tx_flags &= ~I40E_TX_FLAGS_IPV4;
-			tx_flags |= I40E_TX_FLAGS_IPV6;
+			*tx_flags &= ~I40E_TX_FLAGS_IPV4;
+			*tx_flags |= I40E_TX_FLAGS_IPV6;
 		}
 	} else {
 		network_hdr_len = skb_network_header_len(skb);
@@ -2284,12 +2293,12 @@
 	}
 
 	/* Enable IP checksum offloads */
-	if (tx_flags & I40E_TX_FLAGS_IPV4) {
+	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 		l4_hdr = this_ip_hdr->protocol;
 		/* the stack computes the IP header already, the only time we
 		 * need the hardware to recompute it is in the case of TSO.
 		 */
-		if (tx_flags & I40E_TX_FLAGS_TSO) {
+		if (*tx_flags & I40E_TX_FLAGS_TSO) {
 			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
 			this_ip_hdr->check = 0;
 		} else {
@@ -2298,7 +2307,7 @@
 		/* Now set the td_offset for IP header length */
 		*td_offset = (network_hdr_len >> 2) <<
 			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
 		l4_hdr = this_ipv6_hdr->nexthdr;
 		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
 		/* Now set the td_offset for IP header length */
@@ -2396,9 +2405,9 @@
  * Returns 0 if stop is not needed
  **/
 #ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 #else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 #endif
 {
 	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2473,13 +2482,13 @@
  * @td_offset: offset for checksum or crc
  **/
 #ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-		 struct i40e_tx_buffer *first, u32 tx_flags,
-		 const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 			struct i40e_tx_buffer *first, u32 tx_flags,
 			const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+			       struct i40e_tx_buffer *first, u32 tx_flags,
+			       const u8 hdr_len, u32 td_cmd, u32 td_offset)
 #endif
 {
 	unsigned int data_len = skb->data_len;
@@ -2585,9 +2594,6 @@
 						 tx_ring->queue_index),
 			     first->bytecount);
 
-	/* set the timestamp */
-	first->time_stamp = jiffies;
-
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -2640,11 +2646,11 @@
  * one descriptor.
  **/
 #ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
-			       struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
 				      struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+					     struct i40e_ring *tx_ring)
 #endif
 {
 	unsigned int f;
@@ -2706,7 +2712,7 @@
 	else if (protocol == htons(ETH_P_IPV6))
 		tx_flags |= I40E_TX_FLAGS_IPV6;
 
-	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+	tso = i40e_tso(tx_ring, skb, &hdr_len,
 		       &cd_type_cmd_tso_mss, &cd_tunneling);
 
 	if (tso < 0)
@@ -2732,7 +2738,7 @@
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		tx_flags |= I40E_TX_FLAGS_CSUM;
 
-		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+		i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
 				    tx_ring, &cd_tunneling);
 	}
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4b0b810..0dc48dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -139,6 +139,7 @@
 #define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
 #define I40E_TX_FLAGS_TSYN		(u32)(1 << 8)
 #define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -146,7 +147,6 @@
 
 struct i40e_tx_buffer {
 	struct i40e_tx_desc *next_to_watch;
-	unsigned long time_stamp;
 	union {
 		struct sk_buff *skb;
 		void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 568e855..9a5a75b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1133,6 +1133,7 @@
 	/* flow director stats */
 	u64 fd_atr_match;
 	u64 fd_sb_match;
+	u64 fd_atr_tunnel_match;
 	/* EEE LPI */
 	u32 tx_lpi_status;
 	u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4e9376d..23f95cd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -980,6 +980,13 @@
 	int pre_existing_vfs = pci_num_vf(pdev);
 	int err = 0;
 
+	if (pf->state & __I40E_TESTING) {
+		dev_warn(&pdev->dev,
+			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+		err = -EPERM;
+		goto err_out;
+	}
+
 	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 		i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 458fbb4..f54996f1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -322,10 +322,6 @@
 			 tx_ring->vsi->seid,
 			 tx_ring->queue_index,
 			 tx_ring->next_to_use, i);
-		dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-			 "  time_stamp           <%lx>\n"
-			 "  jiffies              <%lx>\n",
-			 tx_ring->tx_bi[i].time_stamp, jiffies);
 
 		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -1128,9 +1124,6 @@
 		/* ERR_MASK will only have valid bits if EOP set */
 		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
-			/* TODO: shouldn't we increment a counter indicating the
-			 * drop?
-			 */
 			continue;
 		}
 
@@ -1156,7 +1149,6 @@
 		skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-		rx_ring->netdev->last_rx = jiffies;
 		rx_desc->wb.qword1.status_error_len = 0;
 
 	} while (likely(total_rx_packets < budget));
@@ -1271,7 +1263,6 @@
 			 : 0;
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-		rx_ring->netdev->last_rx = jiffies;
 		rx_desc->wb.qword1.status_error_len = 0;
 	} while (likely(total_rx_packets < budget));
 
@@ -1352,7 +1343,7 @@
 }
 
 /**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  * @skb:     send buffer
  * @tx_ring: ring to send buffer on
  * @flags:   the tx flags to be set
@@ -1363,9 +1354,9 @@
  * Returns error code indicate the frame should be dropped upon error and the
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-				      struct i40e_ring *tx_ring,
-				      u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+					       struct i40e_ring *tx_ring,
+					       u32 *flags)
 {
 	__be16 protocol = skb->protocol;
 	u32  tx_flags = 0;
@@ -1408,16 +1399,14 @@
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-		    u32 tx_flags, __be16 protocol, u8 *hdr_len,
-		    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+		    u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+		    u32 *cd_tunneling)
 {
 	u32 cd_cmd, cd_tso_len, cd_mss;
 	struct ipv6hdr *ipv6h;
@@ -1468,12 +1457,12 @@
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 				u32 *td_cmd, u32 *td_offset,
 				struct i40e_ring *tx_ring,
 				u32 *cd_tunneling)
@@ -1489,6 +1478,7 @@
 		switch (ip_hdr(skb)->protocol) {
 		case IPPROTO_UDP:
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
 		default:
 			return;
@@ -1498,18 +1488,17 @@
 		this_ipv6_hdr = inner_ipv6_hdr(skb);
 		this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-		if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
-			if (tx_flags & I40E_TX_FLAGS_TSO) {
+		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+			if (*tx_flags & I40E_TX_FLAGS_TSO) {
 				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
 				ip_hdr(skb)->check = 0;
 			} else {
 				*cd_tunneling |=
 					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
 			}
-		} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
 			*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-			if (tx_flags & I40E_TX_FLAGS_TSO)
+			if (*tx_flags & I40E_TX_FLAGS_TSO)
 				ip_hdr(skb)->check = 0;
 		}
 
@@ -1521,8 +1510,8 @@
 					skb_transport_offset(skb)) >> 1) <<
 				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 		if (this_ip_hdr->version == 6) {
-			tx_flags &= ~I40E_TX_FLAGS_IPV4;
-			tx_flags |= I40E_TX_FLAGS_IPV6;
+			*tx_flags &= ~I40E_TX_FLAGS_IPV4;
+			*tx_flags |= I40E_TX_FLAGS_IPV6;
 		}
 
 
@@ -1534,12 +1523,12 @@
 	}
 
 	/* Enable IP checksum offloads */
-	if (tx_flags & I40E_TX_FLAGS_IPV4) {
+	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 		l4_hdr = this_ip_hdr->protocol;
 		/* the stack computes the IP header already, the only time we
 		 * need the hardware to recompute it is in the case of TSO.
 		 */
-		if (tx_flags & I40E_TX_FLAGS_TSO) {
+		if (*tx_flags & I40E_TX_FLAGS_TSO) {
 			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
 			this_ip_hdr->check = 0;
 		} else {
@@ -1548,7 +1537,7 @@
 		/* Now set the td_offset for IP header length */
 		*td_offset = (network_hdr_len >> 2) <<
 			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
 		l4_hdr = this_ipv6_hdr->nexthdr;
 		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
 		/* Now set the td_offset for IP header length */
@@ -1672,7 +1661,44 @@
 }
 
 /**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Memory barrier before checking head and tail */
+	smp_mb();
+
+	/* Check again in a case another CPU has just made room available. */
+	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+	return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
  * @skb:      send buffer
  * @first:    first buffer info buffer to use
@@ -1681,9 +1707,9 @@
  * @td_cmd:   the command field in the descriptor
  * @td_offset: offset for checksum or crc
  **/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-			struct i40e_tx_buffer *first, u32 tx_flags,
-			const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+				 struct i40e_tx_buffer *first, u32 tx_flags,
+				 const u8 hdr_len, u32 td_cmd, u32 td_offset)
 {
 	unsigned int data_len = skb->data_len;
 	unsigned int size = skb_headlen(skb);
@@ -1789,9 +1815,6 @@
 						 tx_ring->queue_index),
 			     first->bytecount);
 
-	/* set the timestamp */
-	first->time_stamp = jiffies;
-
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -1808,8 +1831,12 @@
 
 	tx_ring->next_to_use = i;
 
+	i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
 	/* notify HW of packet */
-	writel(i, tx_ring->tail);
+	if (!skb->xmit_more ||
+	    netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+						   tx_ring->queue_index)))
+		writel(i, tx_ring->tail);
 
 	return;
 
@@ -1831,44 +1858,7 @@
 }
 
 /**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-	/* Memory barrier before checking head and tail */
-	smp_mb();
-
-	/* Check again in a case another CPU has just made room available. */
-	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
-		return -EBUSY;
-
-	/* A reprieve! - use start_queue because it doesn't call schedule */
-	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-	++tx_ring->tx_stats.restart_queue;
-	return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
-		return 0;
-	return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
  * @skb:     send buffer
  * @tx_ring: ring to send buffer on
  *
@@ -1876,8 +1866,8 @@
  * there is not enough descriptors available in this ring since we need at least
  * one descriptor.
  **/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
-				      struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+					       struct i40e_ring *tx_ring)
 {
 	unsigned int f;
 	int count = 0;
@@ -1892,7 +1882,7 @@
 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
 	count += TXD_USE_COUNT(skb_headlen(skb));
-	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+	if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
 		tx_ring->tx_stats.tx_busy++;
 		return 0;
 	}
@@ -1918,11 +1908,11 @@
 	u32 td_cmd = 0;
 	u8 hdr_len = 0;
 	int tso;
-	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+	if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
 		return NETDEV_TX_BUSY;
 
 	/* prepare the xmit flags */
-	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+	if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
 		goto out_drop;
 
 	/* obtain protocol of skb */
@@ -1937,7 +1927,7 @@
 	else if (protocol == htons(ETH_P_IPV6))
 		tx_flags |= I40E_TX_FLAGS_IPV6;
 
-	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+	tso = i40e_tso(tx_ring, skb, &hdr_len,
 		       &cd_type_cmd_tso_mss, &cd_tunneling);
 
 	if (tso < 0)
@@ -1958,17 +1948,15 @@
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		tx_flags |= I40E_TX_FLAGS_CSUM;
 
-		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+		i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
 				    tx_ring, &cd_tunneling);
 	}
 
 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
 			   cd_tunneling, cd_l2tag2);
 
-	i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-		    td_cmd, td_offset);
-
-	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+	i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+		      td_cmd, td_offset);
 
 	return NETDEV_TX_OK;
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 1e49bb1..e7a34f8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -138,6 +138,7 @@
 #define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
 #define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -145,7 +146,6 @@
 
 struct i40e_tx_buffer {
 	struct i40e_tx_desc *next_to_watch;
-	unsigned long time_stamp;
 	union {
 		struct sk_buff *skb;
 		void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ec9d83a..c463ec4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1108,6 +1108,7 @@
 	/* flow director stats */
 	u64 fd_atr_match;
 	u64 fd_sb_match;
+	u64 fd_atr_tunnel_match;
 	/* EEE LPI */
 	u32 tx_lpi_status;
 	u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a0a9b1f..f287186 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1836,31 +1836,19 @@
  *
  * @adapter: adapter struct
  **/
-static s32 igb_enable_mas(struct igb_adapter *adapter)
+static void igb_enable_mas(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 connsw;
-	s32 ret_val = 0;
-
-	connsw = rd32(E1000_CONNSW);
-	if (!(hw->phy.media_type == e1000_media_type_copper))
-		return ret_val;
+	u32 connsw = rd32(E1000_CONNSW);
 
 	/* configure for SerDes media detect */
-	if (!(connsw & E1000_CONNSW_SERDESD)) {
+	if ((hw->phy.media_type == e1000_media_type_copper) &&
+	    (!(connsw & E1000_CONNSW_SERDESD))) {
 		connsw |= E1000_CONNSW_ENRGSRC;
 		connsw |= E1000_CONNSW_AUTOSENSE_EN;
 		wr32(E1000_CONNSW, connsw);
 		wrfl();
-	} else if (connsw & E1000_CONNSW_SERDESD) {
-		/* already SerDes, no need to enable anything */
-		return ret_val;
-	} else {
-		netdev_info(adapter->netdev,
-			"MAS: Unable to configure feature, disabling..\n");
-		adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
 	}
-	return ret_val;
 }
 
 void igb_reset(struct igb_adapter *adapter)
@@ -1980,10 +1968,9 @@
 		adapter->ei.get_invariants(hw);
 		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
 	}
-	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
-		if (igb_enable_mas(adapter))
-			dev_err(&pdev->dev,
-				"Error enabling Media Auto Sense\n");
+	if ((mac->type == e1000_82575) &&
+	    (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+		igb_enable_mas(adapter);
 	}
 	if (hw->mac.ops.init_hw(hw))
 		dev_err(&pdev->dev, "Hardware Error\n");
@@ -4989,6 +4976,7 @@
 	struct igb_tx_buffer *first;
 	int tso;
 	u32 tx_flags = 0;
+	unsigned short f;
 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
 	__be16 protocol = vlan_get_protocol(skb);
 	u8 hdr_len = 0;
@@ -4999,14 +4987,8 @@
 	 *       + 1 desc for context descriptor,
 	 * otherwise try next time
 	 */
-	if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
-		unsigned short f;
-
-		for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-			count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-	} else {
-		count += skb_shinfo(skb)->nr_frags;
-	}
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
 	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
 		/* this is a hard error */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 636f9e3..ac3ac2a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -643,6 +643,7 @@
 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		(u32)(1 << 8)
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 10)
+#define IXGBE_FLAG2_PHY_INTERRUPT		(u32)(1 << 11)
 
 	/* Tx fast path data */
 	int num_tx_queues;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 824a7ab..65db69b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1225,7 +1225,7 @@
 	.setup_link_speed	= &ixgbe_setup_phy_link_speed_generic,
 	.read_i2c_sff8472	= &ixgbe_read_i2c_sff8472_82598,
 	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_82598,
-	.check_overtemp   = &ixgbe_tn_check_overtemp,
+	.check_overtemp		= &ixgbe_tn_check_overtemp,
 };
 
 struct ixgbe_info ixgbe_82598_info = {
@@ -1234,4 +1234,5 @@
 	.mac_ops		= &mac_ops_82598,
 	.eeprom_ops		= &eeprom_ops_82598,
 	.phy_ops		= &phy_ops_82598,
+	.mvals			= ixgbe_mvals_8259X,
 };
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e0c3639..6b87d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -71,7 +71,7 @@
 {
 	u32 fwsm, manc, factps;
 
-	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
 		return false;
 
@@ -79,7 +79,7 @@
 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
 		return false;
 
-	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
 	if (factps & IXGBE_FACTPS_MNGCG)
 		return false;
 
@@ -510,7 +510,7 @@
 	hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
 
 	/* Check to see if MNG FW could be enabled */
-	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
 
 	if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
 	    !hw->wol_enabled &&
@@ -2378,4 +2378,5 @@
 	.eeprom_ops             = &eeprom_ops_82599,
 	.phy_ops                = &phy_ops_82599,
 	.mbx_ops                = &mbx_ops_generic,
+	.mvals			= ixgbe_mvals_8259X,
 };
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 06d8f3c..4c1c267 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -57,6 +57,11 @@
 						 u16 offset);
 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 
+/* Base table for registers values that change by MAC */
+const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
+	IXGBE_MVALS_INIT(8259X)
+};
+
 /**
  *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
  *  control
@@ -91,6 +96,8 @@
 		case IXGBE_DEV_ID_82599_T3_LOM:
 		case IXGBE_DEV_ID_X540T:
 		case IXGBE_DEV_ID_X540T1:
+		case IXGBE_DEV_ID_X550T:
+		case IXGBE_DEV_ID_X550EM_X_10G_T:
 			supported = true;
 			break;
 		default:
@@ -463,7 +470,7 @@
 		}
 	}
 
-	if (hw->mac.type == ixgbe_mac_X540) {
+	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
 		if (hw->phy.id == 0)
 			hw->phy.ops.identify(hw);
 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
@@ -681,7 +688,7 @@
 	bus->lan_id = bus->func;
 
 	/* check for a port swap */
-	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
 	if (reg & IXGBE_FACTPS_LFS)
 		bus->func ^= 0x1;
 }
@@ -799,7 +806,7 @@
 		 * Check for EEPROM present first.
 		 * If not present leave as none
 		 */
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		if (eec & IXGBE_EEC_PRES) {
 			eeprom->type = ixgbe_eeprom_spi;
 
@@ -1283,14 +1290,14 @@
 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
 		return IXGBE_ERR_SWFW_SYNC;
 
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 	/* Request EEPROM Access */
 	eec |= IXGBE_EEC_REQ;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 
 	for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		if (eec & IXGBE_EEC_GNT)
 			break;
 		udelay(5);
@@ -1299,7 +1306,7 @@
 	/* Release if grant not acquired */
 	if (!(eec & IXGBE_EEC_GNT)) {
 		eec &= ~IXGBE_EEC_REQ;
-		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+		IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 		hw_dbg(hw, "Could not acquire EEPROM grant\n");
 
 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1309,7 +1316,7 @@
 	/* Setup EEPROM for Read/Write */
 	/* Clear CS and SK */
 	eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 	IXGBE_WRITE_FLUSH(hw);
 	udelay(1);
 	return 0;
@@ -1333,7 +1340,7 @@
 		 * If the SMBI bit is 0 when we read it, then the bit will be
 		 * set and we have the semaphore
 		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
 		if (!(swsm & IXGBE_SWSM_SMBI))
 			break;
 		usleep_range(50, 100);
@@ -1353,7 +1360,7 @@
 		 * If the SMBI bit is 0 when we read it, then the bit will be
 		 * set and we have the semaphore
 		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
 		if (swsm & IXGBE_SWSM_SMBI) {
 			hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
 			return IXGBE_ERR_EEPROM;
@@ -1362,16 +1369,16 @@
 
 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
 	for (i = 0; i < timeout; i++) {
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
 
 		/* Set the SW EEPROM semaphore bit to request access */
 		swsm |= IXGBE_SWSM_SWESMBI;
-		IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+		IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
 
 		/* If we set the bit successfully then we got the
 		 * semaphore.
 		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
 		if (swsm & IXGBE_SWSM_SWESMBI)
 			break;
 
@@ -1400,11 +1407,11 @@
 {
 	u32 swsm;
 
-	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
 
 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
-	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+	IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
 	IXGBE_WRITE_FLUSH(hw);
 }
 
@@ -1454,15 +1461,15 @@
 {
 	u32 eec;
 
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 	/* Toggle CS to flush commands */
 	eec |= IXGBE_EEC_CS;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 	IXGBE_WRITE_FLUSH(hw);
 	udelay(1);
 	eec &= ~IXGBE_EEC_CS;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 	IXGBE_WRITE_FLUSH(hw);
 	udelay(1);
 }
@@ -1480,7 +1487,7 @@
 	u32 mask;
 	u32 i;
 
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 	/*
 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
@@ -1501,7 +1508,7 @@
 		else
 			eec &= ~IXGBE_EEC_DI;
 
-		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+		IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 		IXGBE_WRITE_FLUSH(hw);
 
 		udelay(1);
@@ -1518,7 +1525,7 @@
 
 	/* We leave the "DI" bit set to "0" when we leave this routine. */
 	eec &= ~IXGBE_EEC_DI;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 	IXGBE_WRITE_FLUSH(hw);
 }
 
@@ -1539,7 +1546,7 @@
 	 * the value of the "DO" bit.  During this "shifting in" process the
 	 * "DI" bit should always be clear.
 	 */
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
 
@@ -1547,7 +1554,7 @@
 		data = data << 1;
 		ixgbe_raise_eeprom_clk(hw, &eec);
 
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 		eec &= ~(IXGBE_EEC_DI);
 		if (eec & IXGBE_EEC_DO)
@@ -1571,7 +1578,7 @@
 	 * (setting the SK bit), then delay
 	 */
 	*eec = *eec | IXGBE_EEC_SK;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
 	IXGBE_WRITE_FLUSH(hw);
 	udelay(1);
 }
@@ -1588,7 +1595,7 @@
 	 * delay
 	 */
 	*eec = *eec & ~IXGBE_EEC_SK;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
 	IXGBE_WRITE_FLUSH(hw);
 	udelay(1);
 }
@@ -1601,19 +1608,19 @@
 {
 	u32 eec;
 
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
 
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 	IXGBE_WRITE_FLUSH(hw);
 
 	udelay(1);
 
 	/* Stop requesting EEPROM access */
 	eec &= ~IXGBE_EEC_REQ;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f21f8a1..ec015fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -118,6 +118,8 @@
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
 			     u32 headroom, int strategy);
 
+extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
+
 #define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
 #define IXGBE_EMC_INTERNAL_DATA		0x00
 #define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index eafa9ec..ec7b232 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -207,6 +207,7 @@
 	switch (adapter->hw.phy.type) {
 	case ixgbe_phy_tn:
 	case ixgbe_phy_aq:
+	case ixgbe_phy_x550em_ext_t:
 	case ixgbe_phy_cu_unknown:
 		ecmd->supported |= SUPPORTED_TP;
 		ecmd->advertising |= ADVERTISED_TP;
@@ -470,16 +471,16 @@
 	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
 
 	/* NVM Register */
-	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
-	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
 	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
 	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
 	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
 	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
 	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
 	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
-	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
 
 	/* Interrupt */
 	/* don't read EICR because it can clear interrupt causes, instead
@@ -2594,18 +2595,35 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_fdir_filter *input;
 	union ixgbe_atr_input mask;
+	u8 queue;
 	int err;
 
 	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 		return -EOPNOTSUPP;
 
-	/*
-	 * Don't allow programming if the action is a queue greater than
-	 * the number of online Rx queues.
+	/* ring_cookie is a masked into a set of queues and ixgbe pools or
+	 * we use the drop index.
 	 */
-	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
-	    (fsp->ring_cookie >= adapter->num_rx_queues))
-		return -EINVAL;
+	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+		queue = IXGBE_FDIR_DROP_QUEUE;
+	} else {
+		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+		if (!vf && (ring >= adapter->num_rx_queues))
+			return -EINVAL;
+		else if (vf &&
+			 ((vf > adapter->num_vfs) ||
+			   ring >= adapter->num_rx_queues_per_pool))
+			return -EINVAL;
+
+		/* Map the ring onto the absolute queue index */
+		if (!vf)
+			queue = adapter->rx_ring[ring]->reg_idx;
+		else
+			queue = ((vf - 1) *
+				adapter->num_rx_queues_per_pool) + ring;
+	}
 
 	/* Don't allow indexes to exist outside of available space */
 	if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2701,7 @@
 
 	/* program filters to filter memory */
 	err = ixgbe_fdir_write_perfect_filter_82599(hw,
-				&input->filter, input->sw_idx,
-				(input->action == IXGBE_FDIR_DROP_QUEUE) ?
-				IXGBE_FDIR_DROP_QUEUE :
-				adapter->rx_ring[input->action]->reg_idx);
+				&input->filter, input->sw_idx, queue);
 	if (err)
 		goto err_out_w_lock;
 
@@ -3053,7 +3068,7 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 status;
+	s32 status;
 	u8 sff8472_rev, addr_mode;
 	bool page_swap = false;
 
@@ -3061,14 +3076,14 @@
 	status = hw->phy.ops.read_i2c_eeprom(hw,
 					     IXGBE_SFF_SFF_8472_COMP,
 					     &sff8472_rev);
-	if (status != 0)
+	if (status)
 		return -EIO;
 
 	/* addressing mode is not supported */
 	status = hw->phy.ops.read_i2c_eeprom(hw,
 					     IXGBE_SFF_SFF_8472_SWAP,
 					     &addr_mode);
-	if (status != 0)
+	if (status)
 		return -EIO;
 
 	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
@@ -3095,7 +3110,7 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
 	u8 databyte = 0xFF;
 	int i = 0;
 
@@ -3112,7 +3127,7 @@
 		else
 			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
 
-		if (status != 0)
+		if (status)
 			return -EIO;
 
 		data[i - ee->offset] = databyte;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5be12a0..9aa6104 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -81,6 +81,8 @@
 static const char ixgbe_copyright[] =
 				"Copyright (c) 1999-2014 Intel Corporation.";
 
+static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
+
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
 	[board_82598]		= &ixgbe_82598_info,
 	[board_82599]		= &ixgbe_82599_info,
@@ -131,6 +133,7 @@
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
 	/* required last entry */
 	{0, }
 };
@@ -2366,7 +2369,7 @@
 		 *  - We may have missed the interrupt so always have to
 		 *    check if we  got a LSC
 		 */
-		if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+		if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
 		    !(eicr & IXGBE_EICR_LSC))
 			return;
 
@@ -2386,14 +2389,13 @@
 
 		break;
 	default:
-		if (!(eicr & IXGBE_EICR_GPI_SDP0))
+		if (adapter->hw.mac.type >= ixgbe_mac_X540)
+			return;
+		if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
 			return;
 		break;
 	}
-	e_crit(drv,
-	       "Network adapter has been stopped because it has over heated. "
-	       "Restart the computer. If the problem persists, "
-	       "power off the system and replace the adapter\n");
+	e_crit(drv, "%s\n", ixgbe_overheat_msg);
 
 	adapter->interrupt_event = 0;
 }
@@ -2403,15 +2405,17 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
-	    (eicr & IXGBE_EICR_GPI_SDP1)) {
+	    (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
 		e_crit(probe, "Fan has stopped, replace the adapter\n");
 		/* write to clear the interrupt */
-		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
 	}
 }
 
 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
+
 	if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
 		return;
 
@@ -2421,7 +2425,8 @@
 		 * Need to check link state so complete overtemp check
 		 * on service task
 		 */
-		if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
+		if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
+		     (eicr & IXGBE_EICR_LSC)) &&
 		    (!test_bit(__IXGBE_DOWN, &adapter->state))) {
 			adapter->interrupt_event = eicr;
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
@@ -2437,28 +2442,46 @@
 		return;
 	}
 
-	e_crit(drv,
-	       "Network adapter has been stopped because it has over heated. "
-	       "Restart the computer. If the problem persists, "
-	       "power off the system and replace the adapter\n");
+	e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
+static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		if (hw->phy.type == ixgbe_phy_nl)
+			return true;
+		return false;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X550EM_x:
+		switch (hw->mac.ops.get_media_type(hw)) {
+		case ixgbe_media_type_fiber:
+		case ixgbe_media_type_fiber_qsfp:
+			return true;
+		default:
+			return false;
+		}
+	default:
+		return false;
+	}
 }
 
 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	if (eicr & IXGBE_EICR_GPI_SDP2) {
+	if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
 		/* Clear the interrupt */
-		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 			adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
 			ixgbe_service_event_schedule(adapter);
 		}
 	}
 
-	if (eicr & IXGBE_EICR_GPI_SDP1) {
+	if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
 		/* Clear the interrupt */
-		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 			adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
 			ixgbe_service_event_schedule(adapter);
@@ -2543,6 +2566,7 @@
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
 				    bool flush)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 
 	/* don't reenable LSC while waiting for link */
@@ -2552,7 +2576,7 @@
 	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
 		switch (adapter->hw.mac.type) {
 		case ixgbe_mac_82599EB:
-			mask |= IXGBE_EIMS_GPI_SDP0;
+			mask |= IXGBE_EIMS_GPI_SDP0(hw);
 			break;
 		case ixgbe_mac_X540:
 		case ixgbe_mac_X550:
@@ -2563,15 +2587,17 @@
 			break;
 		}
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
-		mask |= IXGBE_EIMS_GPI_SDP1;
+		mask |= IXGBE_EIMS_GPI_SDP1(hw);
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_82599EB:
-		mask |= IXGBE_EIMS_GPI_SDP1;
-		mask |= IXGBE_EIMS_GPI_SDP2;
+		mask |= IXGBE_EIMS_GPI_SDP1(hw);
+		mask |= IXGBE_EIMS_GPI_SDP2(hw);
 		/* fall through */
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+		if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
+			mask |= IXGBE_EICR_GPI_SDP0_X540;
 		mask |= IXGBE_EIMS_ECC;
 		mask |= IXGBE_EIMS_MAILBOX;
 		break;
@@ -2626,6 +2652,13 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+		if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
+		    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
+			adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
+			ixgbe_service_event_schedule(adapter);
+			IXGBE_WRITE_REG(hw, IXGBE_EICR,
+					IXGBE_EICR_GPI_SDP0_X540);
+		}
 		if (eicr & IXGBE_EICR_ECC) {
 			e_info(link, "Received ECC Err, initiating reset\n");
 			adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -4703,32 +4736,6 @@
 	ixgbe_configure_dfwd(adapter);
 }
 
-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
-	switch (hw->phy.type) {
-	case ixgbe_phy_sfp_avago:
-	case ixgbe_phy_sfp_ftl:
-	case ixgbe_phy_sfp_intel:
-	case ixgbe_phy_sfp_unknown:
-	case ixgbe_phy_sfp_passive_tyco:
-	case ixgbe_phy_sfp_passive_unknown:
-	case ixgbe_phy_sfp_active_unknown:
-	case ixgbe_phy_sfp_ftl_active:
-	case ixgbe_phy_qsfp_passive_unknown:
-	case ixgbe_phy_qsfp_active_unknown:
-	case ixgbe_phy_qsfp_intel:
-	case ixgbe_phy_qsfp_unknown:
-	/* ixgbe_phy_none is set when no SFP module is present */
-	case ixgbe_phy_none:
-		return true;
-	case ixgbe_phy_nl:
-		if (hw->mac.type == ixgbe_mac_82598EB)
-			return true;
-	default:
-		return false;
-	}
-}
-
 /**
  * ixgbe_sfp_link_config - set up SFP+ link
  * @adapter: pointer to private adapter struct
@@ -4757,7 +4764,7 @@
 {
 	u32 speed;
 	bool autoneg, link_up = false;
-	u32 ret = IXGBE_ERR_LINK_SETUP;
+	int ret = IXGBE_ERR_LINK_SETUP;
 
 	if (hw->mac.ops.check_link)
 		ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -4833,7 +4840,7 @@
 	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
 		switch (adapter->hw.mac.type) {
 		case ixgbe_mac_82599EB:
-			gpie |= IXGBE_SDP0_GPIEN;
+			gpie |= IXGBE_SDP0_GPIEN_8259X;
 			break;
 		case ixgbe_mac_X540:
 			gpie |= IXGBE_EIMS_TS;
@@ -4845,11 +4852,11 @@
 
 	/* Enable fan failure interrupt */
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
-		gpie |= IXGBE_SDP1_GPIEN;
+		gpie |= IXGBE_SDP1_GPIEN(hw);
 
 	if (hw->mac.type == ixgbe_mac_82599EB) {
-		gpie |= IXGBE_SDP1_GPIEN;
-		gpie |= IXGBE_SDP2_GPIEN;
+		gpie |= IXGBE_SDP1_GPIEN_8259X;
+		gpie |= IXGBE_SDP2_GPIEN_8259X;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -4873,6 +4880,9 @@
 	if (hw->mac.ops.enable_tx_laser)
 		hw->mac.ops.enable_tx_laser(hw);
 
+	if (hw->phy.ops.set_phy_power)
+		hw->phy.ops.set_phy_power(hw, true);
+
 	smp_mb__before_atomic();
 	clear_bit(__IXGBE_DOWN, &adapter->state);
 	ixgbe_napi_enable_all(adapter);
@@ -4992,6 +5002,13 @@
 
 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
 		ixgbe_ptp_reset(adapter);
+
+	if (hw->phy.ops.set_phy_power) {
+		if (!netif_running(adapter->netdev) && !adapter->wol)
+			hw->phy.ops.set_phy_power(hw, false);
+		else
+			hw->phy.ops.set_phy_power(hw, true);
+	}
 }
 
 /**
@@ -5260,7 +5277,7 @@
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 		break;
 	case ixgbe_mac_X540:
-		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 		break;
@@ -5672,6 +5689,7 @@
 static int ixgbe_open(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
 	int err, queues;
 
 	/* disallow open during test */
@@ -5729,6 +5747,8 @@
 	ixgbe_free_irq(adapter);
 err_req_irq:
 	ixgbe_free_all_rx_resources(adapter);
+	if (hw->phy.ops.set_phy_power && !adapter->wol)
+		hw->phy.ops.set_phy_power(&adapter->hw, false);
 err_setup_rx:
 	ixgbe_free_all_tx_resources(adapter);
 err_setup_tx:
@@ -5889,6 +5909,8 @@
 	}
 
 	*enable_wake = !!wufc;
+	if (hw->phy.ops.set_phy_power && !*enable_wake)
+		hw->phy.ops.set_phy_power(hw, false);
 
 	ixgbe_release_hw_control(adapter);
 
@@ -6718,6 +6740,26 @@
 	ixgbe_service_event_schedule(adapter);
 }
 
+static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 status;
+
+	if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
+		return;
+
+	adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
+
+	if (!hw->phy.ops.handle_lasi)
+		return;
+
+	status = hw->phy.ops.handle_lasi(&adapter->hw);
+	if (status != IXGBE_ERR_OVERTEMP)
+		return;
+
+	e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
 {
 	if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
@@ -6759,6 +6801,7 @@
 		return;
 	}
 	ixgbe_reset_subtask(adapter);
+	ixgbe_phy_interrupt_subtask(adapter);
 	ixgbe_sfp_detection_subtask(adapter);
 	ixgbe_sfp_link_config_subtask(adapter);
 	ixgbe_check_overtemp_subtask(adapter);
@@ -8022,7 +8065,7 @@
 		return -EINVAL;
 
 	nla_for_each_nested(attr, br_spec, rem) {
-		u32 status;
+		int status;
 		__u16 mode;
 
 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
@@ -8052,7 +8095,8 @@
 		return 0;
 
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
-				       adapter->bridge_mode, 0, 0, nlflags);
+				       adapter->bridge_mode, 0, 0, nlflags,
+				       filter_mask, NULL);
 }
 
 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -8291,6 +8335,10 @@
 		break;
 	case IXGBE_DEV_ID_X540T:
 	case IXGBE_DEV_ID_X540T1:
+	case IXGBE_DEV_ID_X550T:
+	case IXGBE_DEV_ID_X550EM_X_KX4:
+	case IXGBE_DEV_ID_X550EM_X_KR:
+	case IXGBE_DEV_ID_X550EM_X_10G_T:
 		/* check eeprom to see if enabled wol */
 		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
 		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
@@ -8431,10 +8479,11 @@
 	/* Setup hw api */
 	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
 	hw->mac.type  = ii->mac;
+	hw->mvals     = ii->mvals;
 
 	/* EEPROM */
 	memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 	if (ixgbe_removed(hw->hw_addr)) {
 		err = -EIO;
 		goto err_ioremap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 8a2be44..526a20b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -317,14 +317,14 @@
  **/
 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
 {
-	u32 status;
+	s32 status;
 	u16 phy_id_high = 0;
 	u16 phy_id_low = 0;
 
 	status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
 				      &phy_id_high);
 
-	if (status == 0) {
+	if (!status) {
 		hw->phy.id = (u32)(phy_id_high << 16);
 		status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
 					      &phy_id_low);
@@ -347,6 +347,7 @@
 	case TN1010_PHY_ID:
 		phy_type = ixgbe_phy_tn;
 		break;
+	case X550_PHY_ID:
 	case X540_PHY_ID:
 		phy_type = ixgbe_phy_aq;
 		break;
@@ -356,6 +357,9 @@
 	case ATH_PHY_ID:
 		phy_type = ixgbe_phy_nl;
 		break;
+	case X557_PHY_ID:
+		phy_type = ixgbe_phy_x550em_ext_t;
+		break;
 	default:
 		phy_type = ixgbe_phy_unknown;
 		break;
@@ -1348,6 +1352,9 @@
 		return IXGBE_ERR_SFP_NOT_PRESENT;
 	}
 
+	/* LAN ID is needed for sfp_type determination */
+	hw->mac.ops.set_lan_id(hw);
+
 	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
 					     &identifier);
 
@@ -1361,9 +1368,6 @@
 
 	hw->phy.id = identifier;
 
-	/* LAN ID is needed for sfp_type determination */
-	hw->mac.ops.set_lan_id(hw);
-
 	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
 					     &comp_codes_10g);
 
@@ -1793,7 +1797,7 @@
  **/
 static void ixgbe_i2c_start(struct ixgbe_hw *hw)
 {
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 
 	/* Start condition must begin with data and clock high */
 	ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1822,7 +1826,7 @@
  **/
 static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
 {
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 
 	/* Stop condition must begin with data low and clock high */
 	ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1880,9 +1884,9 @@
 	}
 
 	/* Release SDA line (set high) */
-	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+	i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
 	IXGBE_WRITE_FLUSH(hw);
 
 	return status;
@@ -1898,7 +1902,7 @@
 {
 	s32 status = 0;
 	u32 i = 0;
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	u32 timeout = 10;
 	bool ack = true;
 
@@ -1911,7 +1915,7 @@
 	/* Poll for ACK.  Note that ACK in I2C spec is
 	 * transition from 1 to 0 */
 	for (i = 0; i < timeout; i++) {
-		i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+		i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 		ack = ixgbe_get_i2c_data(hw, &i2cctl);
 
 		udelay(1);
@@ -1941,14 +1945,14 @@
  **/
 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
 {
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 
 	ixgbe_raise_i2c_clk(hw, &i2cctl);
 
 	/* Minimum high period of clock is 4us */
 	udelay(IXGBE_I2C_T_HIGH);
 
-	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	*data = ixgbe_get_i2c_data(hw, &i2cctl);
 
 	ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1969,7 +1973,7 @@
 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
 {
 	s32 status;
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 
 	status = ixgbe_set_i2c_data(hw, &i2cctl, data);
 	if (status == 0) {
@@ -2005,14 +2009,14 @@
 	u32 i2cctl_r = 0;
 
 	for (i = 0; i < timeout; i++) {
-		*i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+		*i2cctl |= IXGBE_I2C_CLK_OUT(hw);
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
 		IXGBE_WRITE_FLUSH(hw);
 		/* SCL rise time (1000ns) */
 		udelay(IXGBE_I2C_T_RISE);
 
-		i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-		if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+		i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+		if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
 			break;
 	}
 }
@@ -2027,9 +2031,9 @@
 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
 {
 
-	*i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+	*i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
 
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* SCL fall time (300ns) */
@@ -2047,18 +2051,18 @@
 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
 {
 	if (data)
-		*i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+		*i2cctl |= IXGBE_I2C_DATA_OUT(hw);
 	else
-		*i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+		*i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
 
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
 	udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
 
 	/* Verify data was set correctly */
-	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
 		hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
 		return IXGBE_ERR_I2C;
@@ -2076,7 +2080,7 @@
  **/
 static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
 {
-	if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+	if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
 		return true;
 	return false;
 }
@@ -2090,7 +2094,7 @@
  **/
 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
 {
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	u32 i;
 
 	ixgbe_i2c_start(hw);
@@ -2137,3 +2141,36 @@
 
 	return IXGBE_ERR_OVERTEMP;
 }
+
+/** ixgbe_set_copper_phy_power - Control power for copper phy
+ *  @hw: pointer to hardware structure
+ *  @on: true for on, false for off
+ **/
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+	u32 status;
+	u16 reg;
+
+	/* Bail if we don't have copper phy */
+	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+		return 0;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+	if (status)
+		return status;
+
+	if (on) {
+		reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+	} else {
+		if (ixgbe_check_reset_blocked(hw))
+			return 0;
+		reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+	}
+
+	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+	return status;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 4346438..e45988c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,6 +145,7 @@
 					   u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dd6ba59..b6f424f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -91,14 +91,24 @@
 #define IXGBE_DEV_ID_X550_VF		0x1565
 #define IXGBE_DEV_ID_X550EM_X_VF	0x15A8
 
+#define IXGBE_CAT(r, m)	IXGBE_##r##_##m
+
+#define IXGBE_BY_MAC(_hw, r)	((_hw)->mvals[IXGBE_CAT(r, IDX)])
+
 /* General Registers */
 #define IXGBE_CTRL      0x00000
 #define IXGBE_STATUS    0x00008
 #define IXGBE_CTRL_EXT  0x00018
 #define IXGBE_ESDP      0x00020
 #define IXGBE_EODSDP    0x00028
-#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
-					0x15F5C : 0x00028))
+
+#define IXGBE_I2CCTL_8259X	0x00028
+#define IXGBE_I2CCTL_X540	IXGBE_I2CCTL_8259X
+#define IXGBE_I2CCTL_X550	0x15F5C
+#define IXGBE_I2CCTL_X550EM_x	IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_X550EM_a	IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL(_hw)	IXGBE_BY_MAC((_hw), I2CCTL)
+
 #define IXGBE_LEDCTL    0x00200
 #define IXGBE_FRTIMER   0x00048
 #define IXGBE_TCPTIMER  0x0004C
@@ -106,17 +116,39 @@
 #define IXGBE_EXVET     0x05078
 
 /* NVM Registers */
-#define IXGBE_EEC       0x10010
+#define IXGBE_EEC_8259X		0x10010
+#define IXGBE_EEC_X540		IXGBE_EEC_8259X
+#define IXGBE_EEC_X550		IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_x	IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_a	0x15FF8
+#define IXGBE_EEC(_hw)		IXGBE_BY_MAC((_hw), EEC)
 #define IXGBE_EERD      0x10014
 #define IXGBE_EEWR      0x10018
-#define IXGBE_FLA       0x1001C
+#define IXGBE_FLA_8259X		0x1001C
+#define IXGBE_FLA_X540		IXGBE_FLA_8259X
+#define IXGBE_FLA_X550		IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_x	IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_a	0x15F6C
+#define IXGBE_FLA(_hw)		IXGBE_BY_MAC((_hw), FLA)
 #define IXGBE_EEMNGCTL  0x10110
 #define IXGBE_EEMNGDATA 0x10114
 #define IXGBE_FLMNGCTL  0x10118
 #define IXGBE_FLMNGDATA 0x1011C
 #define IXGBE_FLMNGCNT  0x10120
 #define IXGBE_FLOP      0x1013C
-#define IXGBE_GRC       0x10200
+#define IXGBE_GRC_8259X		0x10200
+#define IXGBE_GRC_X540		IXGBE_GRC_8259X
+#define IXGBE_GRC_X550		IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_x	IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_a	0x15F64
+#define IXGBE_GRC(_hw)		IXGBE_BY_MAC((_hw), GRC)
+
+#define IXGBE_SRAMREL_8259X	0x10210
+#define IXGBE_SRAMREL_X540	IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550	IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_x	IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_a	0x15F6C
+#define IXGBE_SRAMREL(_hw)	IXGBE_BY_MAC((_hw), SRAMREL)
 
 /* General Receive Control */
 #define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
@@ -126,14 +158,55 @@
 #define IXGBE_VPDDIAG1  0x10208
 
 /* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00004000 : 0x00000001)
-#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00000200 : 0x00000002)
-#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00001000 : 0x00000004)
-#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00000400 : 0x00000008)
+#define IXGBE_I2C_CLK_IN_8259X		0x00000001
+#define IXGBE_I2C_CLK_IN_X540		IXGBE_I2C_CLK_IN_8259X
+#define IXGBE_I2C_CLK_IN_X550		0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x	IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_X550EM_a	IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN(_hw)		IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
+#define IXGBE_I2C_CLK_OUT_8259X		0x00000002
+#define IXGBE_I2C_CLK_OUT_X540		IXGBE_I2C_CLK_OUT_8259X
+#define IXGBE_I2C_CLK_OUT_X550		0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x	IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_X550EM_a	IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT(_hw)		IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
+#define IXGBE_I2C_DATA_IN_8259X		0x00000004
+#define IXGBE_I2C_DATA_IN_X540		IXGBE_I2C_DATA_IN_8259X
+#define IXGBE_I2C_DATA_IN_X550		0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x	IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_X550EM_a	IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN(_hw)		IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
+#define IXGBE_I2C_DATA_OUT_8259X	0x00000008
+#define IXGBE_I2C_DATA_OUT_X540		IXGBE_I2C_DATA_OUT_8259X
+#define IXGBE_I2C_DATA_OUT_X550		0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x	IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_X550EM_a	IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT(_hw)		IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN_8259X	0
+#define IXGBE_I2C_DATA_OE_N_EN_X540	IXGBE_I2C_DATA_OE_N_EN_8259X
+#define IXGBE_I2C_DATA_OE_N_EN_X550	0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x	IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a	IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN(_hw)	IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN_8259X		0
+#define IXGBE_I2C_BB_EN_X540		IXGBE_I2C_BB_EN_8259X
+#define IXGBE_I2C_BB_EN_X550		0x00000100
+#define IXGBE_I2C_BB_EN_X550EM_x	IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_X550EM_a	IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN(_hw)		IXGBE_BY_MAC((_hw), I2C_BB_EN)
+
+#define IXGBE_I2C_CLK_OE_N_EN_8259X	0
+#define IXGBE_I2C_CLK_OE_N_EN_X540	IXGBE_I2C_CLK_OE_N_EN_8259X
+#define IXGBE_I2C_CLK_OE_N_EN_X550	0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x	IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a	IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN(_hw)	 IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
+
 #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT	500
 
 #define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
@@ -835,15 +908,36 @@
 #define IXGBE_GSCN_1    0x11024
 #define IXGBE_GSCN_2    0x11028
 #define IXGBE_GSCN_3    0x1102C
-#define IXGBE_FACTPS    0x10150
+#define IXGBE_FACTPS_8259X	0x10150
+#define IXGBE_FACTPS_X540	IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550	IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_x	IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_a	0x15FEC
+#define IXGBE_FACTPS(_hw)	IXGBE_BY_MAC((_hw), FACTPS)
+
 #define IXGBE_PCIEANACTL  0x11040
-#define IXGBE_SWSM      0x10140
-#define IXGBE_FWSM      0x10148
+#define IXGBE_SWSM_8259X	0x10140
+#define IXGBE_SWSM_X540		IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550		IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_x	IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_a	0x15F70
+#define IXGBE_SWSM(_hw)		IXGBE_BY_MAC((_hw), SWSM)
+#define IXGBE_FWSM_8259X	0x10148
+#define IXGBE_FWSM_X540		IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550		IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_x	IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_a	0x15F74
+#define IXGBE_FWSM(_hw)		IXGBE_BY_MAC((_hw), FWSM)
 #define IXGBE_GSSR      0x10160
 #define IXGBE_MREVID    0x11064
 #define IXGBE_DCA_ID    0x11070
 #define IXGBE_DCA_CTRL  0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_8259X		IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540		IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550		IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_x	IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_a	0x15F78
+#define IXGBE_SWFW_SYNC(_hw)		IXGBE_BY_MAC((_hw), SWFW_SYNC)
 
 /* PCIe registers 82599-specific */
 #define IXGBE_GCR_EXT           0x11050
@@ -855,14 +949,21 @@
 #define IXGBE_PHYDAT_82599      0x11044
 #define IXGBE_PHYCTL_82599      0x11048
 #define IXGBE_PBACLR_82599      0x11068
-#define IXGBE_CIAA_82599        0x11088
-#define IXGBE_CIAD_82599        0x1108C
-#define IXGBE_CIAA_X550         0x11508
-#define IXGBE_CIAD_X550         0x11510
-#define IXGBE_CIAA_BY_MAC(_hw)  ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
-				IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
-#define IXGBE_CIAD_BY_MAC(_hw)  ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
-				IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+
+#define IXGBE_CIAA_8259X	0x11088
+#define IXGBE_CIAA_X540		IXGBE_CIAA_8259X
+#define IXGBE_CIAA_X550		0x11508
+#define IXGBE_CIAA_X550EM_x	IXGBE_CIAA_X550
+#define IXGBE_CIAA_X550EM_a	IXGBE_CIAA_X550
+#define IXGBE_CIAA(_hw)		IXGBE_BY_MAC((_hw), CIAA)
+
+#define IXGBE_CIAD_8259X	0x1108C
+#define IXGBE_CIAD_X540		IXGBE_CIAD_8259X
+#define IXGBE_CIAD_X550		0x11510
+#define IXGBE_CIAD_X550EM_x	IXGBE_CIAD_X550
+#define IXGBE_CIAD_X550EM_a	IXGBE_CIAD_X550
+#define IXGBE_CIAD(_hw)		IXGBE_BY_MAC((_hw), CIAD)
+
 #define IXGBE_PICAUSE           0x110B0
 #define IXGBE_PIENA             0x110B8
 #define IXGBE_CDQ_MBR_82599     0x110B4
@@ -1204,18 +1305,37 @@
 #define IXGBE_MDIO_AUTO_NEG_CONTROL	0x0 /* AUTO_NEG Control Reg */
 #define IXGBE_MDIO_AUTO_NEG_STATUS	0x1 /* AUTO_NEG Status Reg */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT	0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define IXGBE_MDIO_AUTO_NEG_VEN_LSC	0x1 /* AUTO_NEG Vendor Tx LSC */
 #define IXGBE_MDIO_AUTO_NEG_ADVT	0x10 /* AUTO_NEG Advt Reg */
 #define IXGBE_MDIO_AUTO_NEG_LP		0x13 /* AUTO_NEG LP Status Reg */
 #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT	0x3C /* AUTO_NEG EEE Advt Reg */
 
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE	 0x0800 /* Set low power mode */
+
 #define IXGBE_MDIO_TX_VENDOR_ALARMS_3	0xCC02 /* Vendor Alarms 3 Reg */
 #define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
 #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
 #define IXGBE_MDIO_POWER_UP_STALL	0x8000 /* Power Up Stall */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK	0xFF00 /* int std mask */
+#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG	0xFC00 /* chip std int flag */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK	0xFF01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG	0xFC01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_ALARM_1		0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL	0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_INT_MASK		0xD400 /* Global int mask */
+/* autoneg vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN	0x1000
+#define IXGBE_MDIO_GLOBAL_ALARM_1_INT		0x4 /* int in Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN	0x1 /* vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT		0x200 /* vendor alarm2 int mask */
+#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN	0x4000 /* int high temp enable */
 
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR	0xC30A /* PHY_XS SDA/SCL Addr Reg */
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA	0xC30B /* PHY_XS SDA/SCL Data Reg */
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT	0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK	0xD401 /* PHY TX Vendor LASI */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN	0x1 /* PHY TX Vendor LASI enable */
 #define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR	0x9 /* Standard Tx Dis Reg */
 #define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE	0x0001 /* PMD Global Tx Dis */
 
@@ -1233,6 +1353,8 @@
 #define TN1010_PHY_ID    0x00A19410
 #define TNX_FW_REV       0xB
 #define X540_PHY_ID      0x01540200
+#define X550_PHY_ID      0x01540220
+#define X557_PHY_ID      0x01540240
 #define QT2022_PHY_ID    0x0043A400
 #define ATH_PHY_ID       0x03429050
 #define AQ_FW_REV        0x20
@@ -1253,9 +1375,25 @@
 #define IXGBE_CONTROL_SOL_NL     0x0000
 
 /* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN         0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN         0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_8259X		0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN_8259X		0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN_8259X		0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540		0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540		0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540		0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550		IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550		IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550		IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x	IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x	IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x	IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_a	IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_a	IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_a	IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN(_hw)		IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN(_hw)		IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN(_hw)		IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
 #define IXGBE_GPIE_MSIX_MODE     0x00000010 /* MSI-X mode */
 #define IXGBE_GPIE_OCD           0x00000020 /* Other Clear Disable */
 #define IXGBE_GPIE_EIMEN         0x00000040 /* Immediate Interrupt Enable */
@@ -1417,9 +1555,25 @@
 #define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */
 #define IXGBE_EICR_TS           0x00800000 /* Thermal Sensor Event */
 #define IXGBE_EICR_TIMESYNC     0x01000000 /* Timesync Event */
-#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2     0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_8259X	0x01000000 /* Gen Purpose INT on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_8259X	0x02000000 /* Gen Purpose INT on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_8259X	0x04000000 /* Gen Purpose INT on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_X540	0x02000000
+#define IXGBE_EICR_GPI_SDP1_X540	0x04000000
+#define IXGBE_EICR_GPI_SDP2_X540	0x08000000
+#define IXGBE_EICR_GPI_SDP0_X550	IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550	IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550	IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x	IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x	IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x	IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_a	IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_a	IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_a	IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0(_hw)	IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1(_hw)	IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2(_hw)	IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
 #define IXGBE_EICR_ECC          0x10000000 /* ECC Error */
 #define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */
 #define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */
@@ -1435,9 +1589,9 @@
 #define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EICS_TIMESYNC     IXGBE_EICR_TIMESYNC  /* Timesync Event */
-#define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP0(_hw)	IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EICS_GPI_SDP1(_hw)	IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EICS_GPI_SDP2(_hw)	IXGBE_EICR_GPI_SDP2(_hw)
 #define IXGBE_EICS_ECC          IXGBE_EICR_ECC       /* ECC Error */
 #define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
@@ -1454,9 +1608,9 @@
 #define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EIMS_TS           IXGBE_EICR_TS        /* Thermel Sensor Event */
 #define IXGBE_EIMS_TIMESYNC     IXGBE_EICR_TIMESYNC  /* Timesync Event */
-#define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP0(_hw)	IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMS_GPI_SDP1(_hw)	IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMS_GPI_SDP2(_hw)	IXGBE_EICR_GPI_SDP2(_hw)
 #define IXGBE_EIMS_ECC          IXGBE_EICR_ECC       /* ECC Error */
 #define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */
@@ -1472,9 +1626,9 @@
 #define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EIMC_TIMESYNC     IXGBE_EICR_TIMESYNC  /* Timesync Event */
-#define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP0(_hw)	IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMC_GPI_SDP1(_hw)	IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMC_GPI_SDP2(_hw)	IXGBE_EICR_GPI_SDP2(_hw)
 #define IXGBE_EIMC_ECC          IXGBE_EICR_ECC       /* ECC Error */
 #define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Err */
@@ -2741,6 +2895,37 @@
 	__be32 dword;
 };
 
+#define IXGBE_MVALS_INIT(m)		\
+	IXGBE_CAT(EEC, m),		\
+	IXGBE_CAT(FLA, m),		\
+	IXGBE_CAT(GRC, m),		\
+	IXGBE_CAT(SRAMREL, m),		\
+	IXGBE_CAT(FACTPS, m),		\
+	IXGBE_CAT(SWSM, m),		\
+	IXGBE_CAT(SWFW_SYNC, m),	\
+	IXGBE_CAT(FWSM, m),		\
+	IXGBE_CAT(SDP0_GPIEN, m),	\
+	IXGBE_CAT(SDP1_GPIEN, m),	\
+	IXGBE_CAT(SDP2_GPIEN, m),	\
+	IXGBE_CAT(EICR_GPI_SDP0, m),	\
+	IXGBE_CAT(EICR_GPI_SDP1, m),	\
+	IXGBE_CAT(EICR_GPI_SDP2, m),	\
+	IXGBE_CAT(CIAA, m),		\
+	IXGBE_CAT(CIAD, m),		\
+	IXGBE_CAT(I2C_CLK_IN, m),	\
+	IXGBE_CAT(I2C_CLK_OUT, m),	\
+	IXGBE_CAT(I2C_DATA_IN, m),	\
+	IXGBE_CAT(I2C_DATA_OUT, m),	\
+	IXGBE_CAT(I2C_DATA_OE_N_EN, m),	\
+	IXGBE_CAT(I2C_BB_EN, m),	\
+	IXGBE_CAT(I2C_CLK_OE_N_EN, m),	\
+	IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+	IXGBE_MVALS_INIT(IDX),
+	IXGBE_MVALS_IDX_LIMIT
+};
+
 enum ixgbe_eeprom_type {
 	ixgbe_eeprom_uninitialized = 0,
 	ixgbe_eeprom_spi,
@@ -3112,6 +3297,8 @@
 	s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
 	s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
 	s32 (*check_overtemp)(struct ixgbe_hw *);
+	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+	s32 (*handle_lasi)(struct ixgbe_hw *hw);
 };
 
 struct ixgbe_eeprom_info {
@@ -3173,6 +3360,7 @@
 	bool                            multispeed_fiber;
 	bool                            reset_if_overtemp;
 	bool                            qsfp_shared_i2c_bus;
+	u32				nw_mng_if_sel;
 };
 
 #include "ixgbe_mbx.h"
@@ -3216,6 +3404,7 @@
 	struct ixgbe_eeprom_info	eeprom;
 	struct ixgbe_bus_info		bus;
 	struct ixgbe_mbx_info		mbx;
+	const u32			*mvals;
 	u16				device_id;
 	u16				vendor_id;
 	u16				subsystem_device_id;
@@ -3234,6 +3423,7 @@
 	struct ixgbe_eeprom_operations	*eeprom_ops;
 	struct ixgbe_phy_operations	*phy_ops;
 	struct ixgbe_mbx_operations	*mbx_ops;
+	const u32			*mvals;
 };
 
 
@@ -3339,4 +3529,6 @@
 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0	2
 #define IXGBE_SB_IOSF_TARGET_KX4_PCS1	3
 
+#define IXGBE_NW_MNG_IF_SEL		0x00011178
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE	BIT(24)
 #endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f5f948d..032a587 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -202,7 +202,7 @@
 		eeprom->semaphore_delay = 10;
 		eeprom->type = ixgbe_flash;
 
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
 				    IXGBE_EEC_SIZE_SHIFT);
 		eeprom->word_size = 1 << (eeprom_size +
@@ -504,8 +504,8 @@
 		return status;
 	}
 
-	flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+	flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
 
 	status = ixgbe_poll_flash_update_done_X540(hw);
 	if (status == 0)
@@ -514,11 +514,11 @@
 		hw_dbg(hw, "Flash update time out\n");
 
 	if (hw->revision_id == 0) {
-		flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+		flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 
 		if (flup & IXGBE_EEC_SEC1VAL) {
 			flup |= IXGBE_EEC_FLUP;
-			IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+			IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
 		}
 
 		status = ixgbe_poll_flash_update_done_X540(hw);
@@ -544,7 +544,7 @@
 	u32 reg;
 
 	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
-		reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+		reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		if (reg & IXGBE_EEC_FLUDONE)
 			return 0;
 		udelay(5);
@@ -580,10 +580,10 @@
 		if (ixgbe_get_swfw_sync_semaphore(hw))
 			return IXGBE_ERR_SWFW_SYNC;
 
-		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
 		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
 			swfw_sync |= swmask;
-			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
 			ixgbe_release_swfw_sync_semaphore(hw);
 			break;
 		} else {
@@ -605,13 +605,13 @@
 	 * corresponding FW/HW bits in the SW_FW_SYNC register.
 	 */
 	if (i >= timeout) {
-		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
 		if (swfw_sync & (fwmask | hwmask)) {
 			if (ixgbe_get_swfw_sync_semaphore(hw))
 				return IXGBE_ERR_SWFW_SYNC;
 
 			swfw_sync |= swmask;
-			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
 			ixgbe_release_swfw_sync_semaphore(hw);
 		}
 	}
@@ -635,9 +635,9 @@
 
 	ixgbe_get_swfw_sync_semaphore(hw);
 
-	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
 	swfw_sync &= ~swmask;
-	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
 
 	ixgbe_release_swfw_sync_semaphore(hw);
 	usleep_range(5000, 10000);
@@ -660,7 +660,7 @@
 		/* If the SMBI bit is 0 when we read it, then the bit will be
 		 * set and we have the semaphore
 		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
 		if (!(swsm & IXGBE_SWSM_SMBI))
 			break;
 		usleep_range(50, 100);
@@ -674,7 +674,7 @@
 
 	/* Now get the semaphore between SW/FW through the REGSMP bit */
 	for (i = 0; i < timeout; i++) {
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
 		if (!(swsm & IXGBE_SWFW_REGSMP))
 			return 0;
 
@@ -696,13 +696,13 @@
 
 	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
 
-	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-	swsm &= ~IXGBE_SWSM_SMBI;
-	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
-	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
 	swsm &= ~IXGBE_SWFW_REGSMP;
-	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm);
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
+	swsm &= ~IXGBE_SWSM_SMBI;
+	IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
 
 	IXGBE_WRITE_FLUSH(hw);
 }
@@ -850,9 +850,14 @@
 	.read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
 	.write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
 	.check_overtemp         = &ixgbe_tn_check_overtemp,
+	.set_phy_power          = &ixgbe_set_copper_phy_power,
 	.get_firmware_version   = &ixgbe_get_phy_firmware_version_generic,
 };
 
+static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+	IXGBE_MVALS_INIT(X540)
+};
+
 struct ixgbe_info ixgbe_X540_info = {
 	.mac                    = ixgbe_mac_X540,
 	.get_invariants         = &ixgbe_get_invariants_X540,
@@ -860,4 +865,5 @@
 	.eeprom_ops             = &eeprom_ops_X540,
 	.phy_ops                = &phy_ops_X540,
 	.mbx_ops                = &mbx_ops_generic,
+	.mvals			= ixgbe_mvals_X540,
 };
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cf5cf81..7581da1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  *  Intel 10 Gigabit PCI Express Linux driver
- *  Copyright(c) 1999 - 2014 Intel Corporation.
+ *  Copyright(c) 1999 - 2015 Intel Corporation.
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,22 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
+/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
+ *  @hw: pointer to hardware structure
+ **/
+static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
+{
+	u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+	if (hw->bus.lan_id) {
+		esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+		esdp |= IXGBE_ESDP_SDP1_DIR;
+	}
+	esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
 /** ixgbe_identify_phy_x550em - Get PHY type based on device id
  *  @hw: pointer to hardware structure
  *
@@ -33,18 +49,11 @@
  */
 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
 {
-	u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
 	switch (hw->device_id) {
 	case IXGBE_DEV_ID_X550EM_X_SFP:
 		/* set up for CS4227 usage */
 		hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
-		if (hw->bus.lan_id) {
-			esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
-			esdp |= IXGBE_ESDP_SDP1_DIR;
-		}
-		esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		ixgbe_setup_mux_ctl(hw);
 
 		return ixgbe_identify_module_generic(hw);
 	case IXGBE_DEV_ID_X550EM_X_KX4:
@@ -90,7 +99,7 @@
 		eeprom->semaphore_delay = 10;
 		eeprom->type = ixgbe_flash;
 
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
 				    IXGBE_EEC_SIZE_SHIFT);
 		eeprom->word_size = 1 << (eeprom_size +
@@ -103,6 +112,39 @@
 	return 0;
 }
 
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Return: failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ */
+static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+	u32 i, command;
+
+	/* Check every 10 usec to see if the address cycle completed.
+	 * The SB IOSF BUSY bit will clear when the operation is
+	 * complete.
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+		if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
+			break;
+		usleep_range(10, 20);
+	}
+	if (ctrl)
+		*ctrl = command;
+	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+		hw_dbg(hw, "IOSF wait timed out\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	return 0;
+}
+
 /** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
  *  IOSF device
  *  @hw: pointer to hardware structure
@@ -113,7 +155,17 @@
 static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
 				       u32 device_type, u32 *data)
 {
-	u32 i, command, error;
+	u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+	u32 command, error;
+	s32 ret;
+
+	ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+	if (ret)
+		return ret;
+
+	ret = ixgbe_iosf_wait(hw, NULL);
+	if (ret)
+		goto out;
 
 	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
 		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -121,17 +173,7 @@
 	/* Write IOSF control register */
 	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
 
-	/* Check every 10 usec to see if the address cycle completed.
-	 * The SB IOSF BUSY bit will clear when the operation is
-	 * complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usleep_range(10, 20);
-
-		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
-		if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
-			break;
-	}
+	ret = ixgbe_iosf_wait(hw, &command);
 
 	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
 		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -140,14 +182,12 @@
 		return IXGBE_ERR_PHY;
 	}
 
-	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
-		hw_dbg(hw, "Read timed out\n");
-		return IXGBE_ERR_PHY;
-	}
+	if (!ret)
+		*data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
 
-	*data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
-
-	return 0;
+out:
+	hw->mac.ops.release_swfw_sync(hw, gssr);
+	return ret;
 }
 
 /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
@@ -673,6 +713,249 @@
 	return status;
 }
 
+/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
+ *  IOSF device
+ *
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+					u32 device_type, u32 data)
+{
+	u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+	u32 command, error;
+	s32 ret;
+
+	ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+	if (ret)
+		return ret;
+
+	ret = ixgbe_iosf_wait(hw, NULL);
+	if (ret)
+		goto out;
+
+	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+	/* Write IOSF control register */
+	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+	/* Write IOSF data register */
+	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+	ret = ixgbe_iosf_wait(hw, &command);
+
+	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+		hw_dbg(hw, "Failed to write, error %x\n", error);
+		return IXGBE_ERR_PHY;
+	}
+
+out:
+	hw->mac.ops.release_swfw_sync(hw, gssr);
+	return ret;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ *  @hw: pointer to hardware structure
+ *  @speed: the link speed to force
+ *
+ *  Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ *  internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+	s32 status;
+	u32 reg_val;
+
+	/* Disable AN and force speed to 10G Serial. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+	/* Select forced link speed for internal PHY. */
+	switch (*speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+		break;
+	default:
+		/* Other link speeds are not supported by internal KR PHY. */
+		return IXGBE_ERR_LINK_SETUP;
+	}
+
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status)
+		return status;
+
+	/* Disable training protocol FSM. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status)
+		return status;
+
+	/* Disable Flex from training TXFFE. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status)
+		return status;
+
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status)
+		return status;
+
+	/* Enable override for coefficients. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status)
+		return status;
+
+	/* Toggle port SW reset by AN reset. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+	return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Setup internal/external PHY link speed based on link speed, then set
+ * external PHY auto advertised link speed.
+ *
+ * Returns error status for any failure
+ **/
+static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+					 ixgbe_link_speed speed,
+					 bool autoneg_wait)
+{
+	s32 status;
+	ixgbe_link_speed force_speed;
+
+	/* Setup internal/external PHY link speed to iXFI (10G), unless
+	 * only 1G is auto advertised then setup KX link.
+	 */
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+	else
+		force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+	/* If internal link mode is XFI, then setup XFI internal link. */
+	if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+		status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
+
+		if (status)
+			return status;
+	}
+
+	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/** ixgbe_check_link_t_X550em - Determine link and speed status
+  * @hw: pointer to hardware structure
+  * @speed: pointer to link speed
+  * @link_up: true when link is up
+  * @link_up_wait_to_complete: bool used to wait for link up or not
+  *
+  * Check that both the MAC and X557 external PHY have link.
+  **/
+static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+				     ixgbe_link_speed *speed,
+				     bool *link_up,
+				     bool link_up_wait_to_complete)
+{
+	u32 status;
+	u16 autoneg_status;
+
+	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+		return IXGBE_ERR_CONFIG;
+
+	status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+					      link_up_wait_to_complete);
+
+	/* If check link fails or MAC link is not up, then return */
+	if (status || !(*link_up))
+		return status;
+
+	 /* MAC link is up, so check external PHY link.
+	  * Read this twice back to back to indicate current status.
+	  */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_status);
+	if (status)
+		return status;
+
+	/* If external PHY link is not up, then indicate link not up */
+	if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+		*link_up = false;
+
+	return 0;
+}
+
 /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
  *  @hw: pointer to hardware structure
  **/
@@ -680,13 +963,21 @@
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
 
-	/* CS4227 does not support autoneg, so disable the laser control
-	 * functions for SFP+ fiber
-	 */
-	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+	switch (mac->ops.get_media_type(hw)) {
+	case ixgbe_media_type_fiber:
+		/* CS4227 does not support autoneg, so disable the laser control
+		 * functions for SFP+ fiber
+		 */
 		mac->ops.disable_tx_laser = NULL;
 		mac->ops.enable_tx_laser = NULL;
 		mac->ops.flap_tx_laser = NULL;
+		break;
+	case ixgbe_media_type_copper:
+		mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+		mac->ops.check_link = ixgbe_check_link_t_X550em;
+		break;
+	default:
+		break;
 	}
 }
 
@@ -778,169 +1069,221 @@
 	return 0;
 }
 
-/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
- *  IOSF device
+/**
+ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ *	 PHY interrupt is lsc
  *
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @device_type: 3 bit device type
- *  @data: Data to write to the register
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
  **/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-					u32 device_type, u32 data)
+static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
 {
-	u32 i, command, error;
+	u32 status;
+	u16 reg;
 
-	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
-		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+	*lsc = false;
 
-	/* Write IOSF control register */
-	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+	/* Vendor alarm triggered */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
 
-	/* Write IOSF data register */
-	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+	if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
+		return status;
 
-	/* Check every 10 usec to see if the address cycle completed.
-	 * The SB IOSF BUSY bit will clear when the operation is
-	 * complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usleep_range(10, 20);
+	/* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
 
-		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
-		if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
-			break;
+	if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+				IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
+		return status;
+
+	/* High temperature failure alarm triggered */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+
+	if (status)
+		return status;
+
+	/* If high temperature failure, then return over temp error and exit */
+	if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+		/* power down the PHY in case the PHY FW didn't already */
+		ixgbe_set_copper_phy_power(hw, false);
+		return IXGBE_ERR_OVERTEMP;
 	}
 
-	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
-		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
-			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
-		hw_dbg(hw, "Failed to write, error %x\n", error);
-		return IXGBE_ERR_PHY;
-	}
+	/* Vendor alarm 2 triggered */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
 
-	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
-		hw_dbg(hw, "Write timed out\n");
-		return IXGBE_ERR_PHY;
-	}
+	if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
+		return status;
+
+	/* link connect/disconnect event occurred */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+	if (status)
+		return status;
+
+	/* Indicate LSC */
+	if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
+		*lsc = true;
 
 	return 0;
 }
 
-/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
- *  @hw: pointer to hardware structure
- *  @speed: the link speed to force
+/**
+ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
+ * @hw: pointer to hardware structure
  *
- *  Configures the integrated KR PHY to use iXFI mode. Used to connect an
- *  internal and external PHY at a specific speed, without autonegotiation.
+ * Enable link status change and temperature failure alarm for the external
+ * Base T PHY
+ *
+ * Returns PHY access status
  **/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+	u32 status;
+	u16 reg;
+	bool lsc;
+
+	/* Clear interrupt flags */
+	status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+	/* Enable link status change alarm */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+	if (status)
+		return status;
+
+	reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+
+	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+				       IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+	if (status)
+		return status;
+
+	/* Enables high temperature failure alarm */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+	if (status)
+		return status;
+
+	reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN;
+
+	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+	if (status)
+		return status;
+
+	/* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+	if (status)
+		return status;
+
+	reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+		IXGBE_MDIO_GLOBAL_ALARM_1_INT);
+
+	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+	if (status)
+		return status;
+
+	/* Enable chip-wide vendor alarm */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+	if (status)
+		return status;
+
+	reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
+
+	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+
+	return status;
+}
+
+/**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ **/
+static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+	struct ixgbe_phy_info *phy = &hw->phy;
+	bool lsc;
+	u32 status;
+
+	status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+	if (status)
+		return status;
+
+	if (lsc)
+		return phy->ops.setup_internal_link(hw);
+
+	return 0;
+}
+
+/**
+ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ *
+ * Configures the integrated KR PHY.
+ **/
+static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+				       ixgbe_link_speed speed)
 {
 	s32 status;
 	u32 reg_val;
 
-	/* Disable AN and force speed to 10G Serial. */
 	status = ixgbe_read_iosf_sb_reg_x550(hw,
 					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
 					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
 	if (status)
 		return status;
 
-	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
-	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+	reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
+		     IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
+	reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+		     IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
 
-	/* Select forced link speed for internal PHY. */
-	switch (*speed) {
-	case IXGBE_LINK_SPEED_10GB_FULL:
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
-		break;
-	case IXGBE_LINK_SPEED_1GB_FULL:
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
-		break;
-	default:
-		/* Other link speeds are not supported by internal KR PHY. */
-		return IXGBE_ERR_LINK_SETUP;
-	}
+	/* Advertise 10G support. */
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
 
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status)
-		return status;
+	/* Advertise 1G support. */
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
 
-	/* Disable training protocol FSM. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
-	reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status)
-		return status;
-
-	/* Disable Flex from training TXFFE. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status)
-		return status;
-
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status)
-		return status;
-
-	/* Enable override for coefficients. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status)
-		return status;
-
-	/* Toggle port SW reset by AN reset. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
+	/* Restart auto-negotiation. */
 	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
 	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
 
 	return status;
 }
@@ -990,85 +1333,82 @@
  **/
 static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
 {
-	s32 status;
-	u32 reg_val;
-
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-	reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
-		     IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
-
-	/* Advertise 10G support. */
-	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
-
-	/* Advertise 1G support. */
-	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
-
-	/* Restart auto-negotiation. */
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
-	return status;
+	return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
 }
 
-/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
+ *  @hw: address of hardware structure
+ *  @link_up: address of boolean to indicate link status
+ *
+ *  Returns error code if unable to get link status.
+ **/
+static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+{
+	u32 ret;
+	u16 autoneg_status;
+
+	*link_up = false;
+
+	/* read this twice back to back to indicate current status */
+	ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+				   IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				   &autoneg_status);
+	if (ret)
+		return ret;
+
+	ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+				   IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				   &autoneg_status);
+	if (ret)
+		return ret;
+
+	*link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
+
+	return 0;
+}
+
+/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
  *  @hw: point to hardware structure
  *
- *  Configures the integrated KR PHY to talk to the external PHY. The base
- *  driver will call this function when it gets notification via interrupt from
- *  the external PHY. This function forces the internal PHY into iXFI mode at
- *  the correct speed.
+ *  Configures the link between the integrated KR PHY and the external X557 PHY
+ *  The driver will call this function when it gets a link status change
+ *  interrupt from the X557 PHY. This function configures the link speed
+ *  between the PHYs to match the link speed of the BASE-T link.
  *
- *  A return of a non-zero value indicates an error, and the base driver should
- *  not report link up.
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
  **/
-static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
 {
-	u32 status;
-	u16 lasi, autoneg_status, speed;
 	ixgbe_link_speed force_speed;
+	bool link_up;
+	u32 status;
+	u16 speed;
 
-	/* Verify that the external link status has changed */
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
-				      IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+		return IXGBE_ERR_CONFIG;
+
+	/* If link is not up, then there is no setup necessary so return  */
+	status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
 	if (status)
 		return status;
 
-	/* If there was no change in link status, we can just exit */
-	if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+	if (!link_up)
 		return 0;
 
-	/* we read this twice back to back to indicate current status */
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      &autoneg_status);
-	if (status)
-		return status;
-
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      &autoneg_status);
-	if (status)
-		return status;
-
-	/* If link is not up return an error indicating treat link as down */
-	if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
-
 	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
 				      &speed);
+	if (status)
+		return status;
+
+	/* If link is not still up, then no setup is necessary so return */
+	status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+	if (status)
+		return status;
+
+	if (!link_up)
+		return 0;
 
 	/* clear everything but the speed and duplex bits */
 	speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
@@ -1088,6 +1428,22 @@
 	return ixgbe_setup_ixfi_x550em(hw, &force_speed);
 }
 
+/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
+ *  @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	status = ixgbe_reset_phy_generic(hw);
+
+	if (status)
+		return status;
+
+	/* Configure Link Status Alarm and Temperature Threshold interrupts */
+	return ixgbe_enable_lasi_ext_t_x550em(hw);
+}
+
 /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
  *  @hw: pointer to hardware structure
  *
@@ -1098,25 +1454,32 @@
 static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
 {
 	struct ixgbe_phy_info *phy = &hw->phy;
+	ixgbe_link_speed speed;
 	s32 ret_val;
-	u32 esdp;
 
-	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	hw->mac.ops.set_lan_id(hw);
+
+	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
 		phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+		ixgbe_setup_mux_ctl(hw);
 
-		if (hw->bus.lan_id) {
-			esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
-			esdp |= IXGBE_ESDP_SDP1_DIR;
+		/* Save NW management interface connected on board. This is used
+		 * to determine internal PHY mode.
+		 */
+		phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+		/* If internal PHY mode is KR, then initialize KR link */
+		if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
+			speed = IXGBE_LINK_SPEED_10GB_FULL |
+				IXGBE_LINK_SPEED_1GB_FULL;
+			ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
 		}
-		esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
 	}
 
 	/* Identify the PHY or SFP module */
 	ret_val = phy->ops.identify(hw);
 
-	/* Setup function pointers based on detected SFP module and speeds */
+	/* Setup function pointers based on detected hardware */
 	ixgbe_init_mac_link_ops_X550em(hw);
 	if (phy->sfp_type != ixgbe_sfp_type_unknown)
 		phy->ops.reset = NULL;
@@ -1134,11 +1497,30 @@
 		phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
 		break;
 	case ixgbe_phy_x550em_ext_t:
-		phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+		/* Save NW management interface connected on board. This is used
+		 * to determine internal PHY mode
+		 */
+		phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+		/* If internal link mode is XFI, then setup iXFI internal link,
+		 * else setup KR now.
+		 */
+		if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+			phy->ops.setup_internal_link =
+					ixgbe_setup_internal_phy_t_x550em;
+		} else {
+			speed = IXGBE_LINK_SPEED_10GB_FULL |
+				IXGBE_LINK_SPEED_1GB_FULL;
+			ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
+		}
+
+		phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
+		phy->ops.reset = ixgbe_reset_phy_t_X550em;
 		break;
 	default:
 		break;
 	}
+
 	return ret_val;
 }
 
@@ -1177,67 +1559,37 @@
  **/
 static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
 {
-	u32 status;
+	s32 status;
 	u16 reg;
-	u32 retries = 2;
 
-	do {
-		/* decrement retries counter and exit if we hit 0 */
-		if (retries < 1) {
-			hw_dbg(hw, "External PHY not yet finished resetting.");
-			return IXGBE_ERR_PHY;
-		}
-		retries--;
-
-		status = hw->phy.ops.read_reg(hw,
-					      IXGBE_MDIO_TX_VENDOR_ALARMS_3,
-					      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-					      &reg);
-		if (status)
-			return status;
-
-		/* Verify PHY FW reset has completed */
-	} while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
-
-	/* Set port to low power mode */
 	status = hw->phy.ops.read_reg(hw,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      &reg);
-	if (status)
-		return status;
-
-	/* Enable the transmitter */
-	status = hw->phy.ops.read_reg(hw,
-				      IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+				      IXGBE_MDIO_TX_VENDOR_ALARMS_3,
 				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
 				      &reg);
 	if (status)
 		return status;
 
-	reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+	/* If PHY FW reset completed bit is set then this is the first
+	 * SW instance after a power on so the PHY FW must be un-stalled.
+	 */
+	if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+		status = hw->phy.ops.read_reg(hw,
+					IXGBE_MDIO_GLOBAL_RES_PR_10,
+					IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+					&reg);
+		if (status)
+			return status;
 
-	status = hw->phy.ops.write_reg(hw,
-				       IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
-				       IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				       reg);
-	if (status)
-		return status;
+		reg &= ~IXGBE_MDIO_POWER_UP_STALL;
 
-	/* Un-stall the PHY FW */
-	status = hw->phy.ops.read_reg(hw,
-				      IXGBE_MDIO_GLOBAL_RES_PR_10,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      &reg);
-	if (status)
-		return status;
+		status = hw->phy.ops.write_reg(hw,
+					IXGBE_MDIO_GLOBAL_RES_PR_10,
+					IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+					reg);
+		if (status)
+			return status;
+	}
 
-	reg &= ~IXGBE_MDIO_POWER_UP_STALL;
-
-	status = hw->phy.ops.write_reg(hw,
-				       IXGBE_MDIO_GLOBAL_RES_PR_10,
-				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				       reg);
 	return status;
 }
 
@@ -1254,6 +1606,7 @@
 	s32 status;
 	u32 ctrl = 0;
 	u32 i;
+	u32 hlreg0;
 	bool link_up = false;
 
 	/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -1338,6 +1691,15 @@
 	hw->mac.num_rar_entries = 128;
 	hw->mac.ops.init_rx_addrs(hw);
 
+	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+		hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+		hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+	}
+
+	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+		ixgbe_setup_mux_ctl(hw);
+
 	return status;
 }
 
@@ -1490,6 +1852,10 @@
 	.read_i2c_sff8472	= &ixgbe_read_i2c_sff8472_generic, \
 	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_generic, \
 	.write_i2c_eeprom	= &ixgbe_write_i2c_eeprom_generic, \
+	.read_reg		= &ixgbe_read_phy_reg_generic, \
+	.write_reg		= &ixgbe_write_phy_reg_generic, \
+	.setup_link		= &ixgbe_setup_phy_link_generic, \
+	.set_phy_power		= &ixgbe_set_copper_phy_power, \
 	.check_overtemp		= &ixgbe_tn_check_overtemp, \
 	.get_firmware_version	= &ixgbe_get_phy_firmware_version_generic,
 
@@ -1497,9 +1863,6 @@
 	X550_COMMON_PHY
 	.init			= NULL,
 	.identify		= &ixgbe_identify_phy_generic,
-	.read_reg		= &ixgbe_read_phy_reg_generic,
-	.write_reg		= &ixgbe_write_phy_reg_generic,
-	.setup_link		= &ixgbe_setup_phy_link_generic,
 	.read_i2c_combined	= &ixgbe_read_i2c_combined_generic,
 	.write_i2c_combined	= &ixgbe_write_i2c_combined_generic,
 };
@@ -1508,9 +1871,14 @@
 	X550_COMMON_PHY
 	.init			= &ixgbe_init_phy_ops_X550em,
 	.identify		= &ixgbe_identify_phy_x550em,
-	.read_reg		= NULL, /* defined later */
-	.write_reg		= NULL, /* defined later */
-	.setup_link		= NULL, /* defined later */
+};
+
+static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+	IXGBE_MVALS_INIT(X550)
+};
+
+static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+	IXGBE_MVALS_INIT(X550EM_x)
 };
 
 struct ixgbe_info ixgbe_X550_info = {
@@ -1520,6 +1888,7 @@
 	.eeprom_ops		= &eeprom_ops_X550,
 	.phy_ops		= &phy_ops_X550,
 	.mbx_ops		= &mbx_ops_generic,
+	.mvals			= ixgbe_mvals_X550,
 };
 
 struct ixgbe_info ixgbe_X550EM_x_info = {
@@ -1529,4 +1898,5 @@
 	.eeprom_ops		= &eeprom_ops_X550EM_x,
 	.phy_ops		= &phy_ops_X550EM_x,
 	.mbx_ops		= &mbx_ops_generic,
+	.mvals			= ixgbe_mvals_X550EM_x,
 };
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index d323a69..80af9ff 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1c75829..d52639b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3125,9 +3125,8 @@
 
 	mib_counters_clear(mp);
 
-	init_timer(&mp->mib_counters_timer);
-	mp->mib_counters_timer.data = (unsigned long)mp;
-	mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+	setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+		    (unsigned long)mp);
 	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
 
 	spin_lock_init(&mp->mib_counters_lock);
@@ -3136,9 +3135,7 @@
 
 	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
 
-	init_timer(&mp->rx_oom);
-	mp->rx_oom.data = (unsigned long)mp;
-	mp->rx_oom.function = oom_timer_wrapper;
+	setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
 
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ce5f7f9..5bdf782 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1013,6 +1013,12 @@
 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
 		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+	} else {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+		       MVNETA_GMAC_AN_SPEED_EN |
+		       MVNETA_GMAC_AN_DUPLEX_EN);
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
 	}
 
 	mvneta_set_ucast_table(pp, -1);
@@ -1359,7 +1365,7 @@
 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
 {
 	if (likely(pp->frag_size <= PAGE_SIZE))
-		put_page(virt_to_head_page(data));
+		skb_free_frag(data);
 	else
 		kfree(data);
 }
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 8cf7563..52a6665 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 529ef05..8204013 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -49,6 +49,7 @@
 #include "mlx4.h"
 #include "fw.h"
 #include "fw_qos.h"
+#include "mlx4_stats.h"
 
 #define CMD_POLL_TOKEN 0xffff
 #define INBOX_MASK	0xffffffffffffff00ULL
@@ -882,7 +883,7 @@
 {
 	struct ib_smp *smp = inbox->buf;
 	u32 index;
-	u8 port;
+	u8 port, slave_port;
 	u8 opcode_modifier;
 	u16 *table;
 	int err;
@@ -894,7 +895,8 @@
 	__be32 slave_cap_mask;
 	__be64 slave_node_guid;
 
-	port = vhcr->in_modifier;
+	slave_port = vhcr->in_modifier;
+	port = mlx4_slave_convert_port(dev, slave, slave_port);
 
 	/* network-view bit is for driver use only, and should not be passed to FW */
 	opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
@@ -930,8 +932,9 @@
 			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
 				/*get the slave specific caps:*/
 				/*do the command */
+				smp->attr_mod = cpu_to_be32(port);
 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
-					    vhcr->in_modifier, opcode_modifier,
+					    port, opcode_modifier,
 					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 				/* modify the response for slaves */
 				if (!err && slave != mlx4_master_func_num(dev)) {
@@ -975,7 +978,7 @@
 			}
 			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
-					     vhcr->in_modifier, opcode_modifier,
+					     port, opcode_modifier,
 					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 				if (!err) {
 					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
@@ -2915,7 +2918,7 @@
 	port = mlx4_slaves_closest_port(dev, slave, port);
 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
 	s_info->mac = mac;
-	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+	mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
 		  vf, port, s_info->mac);
 	return 0;
 }
@@ -3164,6 +3167,92 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
 
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+			   struct mlx4_counter *counter_stats, int reset)
+{
+	struct mlx4_cmd_mailbox *mailbox = NULL;
+	struct mlx4_counter *tmp_counter;
+	int err;
+	u32 if_stat_in_mod;
+
+	if (!counter_stats)
+		return -EINVAL;
+
+	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+		return 0;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
+	if_stat_in_mod = counter_index;
+	if (reset)
+		if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
+	err = mlx4_cmd_box(dev, 0, mailbox->dma,
+			   if_stat_in_mod, 0,
+			   MLX4_CMD_QUERY_IF_STAT,
+			   MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_NATIVE);
+	if (err) {
+		mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
+			 __func__, counter_index);
+		goto if_stat_out;
+	}
+	tmp_counter = (struct mlx4_counter *)mailbox->buf;
+	counter_stats->counter_mode = tmp_counter->counter_mode;
+	if (counter_stats->counter_mode == 0) {
+		counter_stats->rx_frames =
+			cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
+				    be64_to_cpu(tmp_counter->rx_frames));
+		counter_stats->tx_frames =
+			cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
+				    be64_to_cpu(tmp_counter->tx_frames));
+		counter_stats->rx_bytes =
+			cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
+				    be64_to_cpu(tmp_counter->rx_bytes));
+		counter_stats->tx_bytes =
+			cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
+				    be64_to_cpu(tmp_counter->tx_bytes));
+	}
+
+if_stat_out:
+	mlx4_free_cmd_mailbox(dev, mailbox);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
+
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+		      struct ifla_vf_stats *vf_stats)
+{
+	struct mlx4_counter tmp_vf_stats;
+	int slave;
+	int err = 0;
+
+	if (!vf_stats)
+		return -EINVAL;
+
+	if (!mlx4_is_master(dev))
+		return -EPROTONOSUPPORT;
+
+	slave = mlx4_get_slave_indx(dev, vf_idx);
+	if (slave < 0)
+		return -EINVAL;
+
+	port = mlx4_slaves_closest_port(dev, slave, port);
+	err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
+	if (!err && tmp_vf_stats.counter_mode == 0) {
+		vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
+		vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
+		vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
+		vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
+
 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3197,6 +3286,12 @@
 				 int enabled)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+			&priv->dev, slave);
+	int min_port = find_first_bit(actv_ports.ports,
+				      priv->dev.caps.num_ports) + 1;
+	int max_port = min_port - 1 +
+		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
 
 	if (slave == mlx4_master_func_num(dev))
 		return 0;
@@ -3206,6 +3301,11 @@
 	    enabled < 0 || enabled > 1)
 		return -EINVAL;
 
+	if (min_port == max_port && dev->caps.num_ports > 1) {
+		mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
+		return -EPROTONOSUPPORT;
+	}
+
 	priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
 	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index e71f313..3348e64 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -292,7 +292,7 @@
 	u64 mtt_addr;
 	int err;
 
-	if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+	if (vector >= dev->caps.num_comp_vectors)
 		return -EINVAL;
 
 	cq->vector = vector;
@@ -319,7 +319,7 @@
 		cq_context->flags  |= cpu_to_be32(1 << 19);
 
 	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
-	cq_context->comp_eqn	    = priv->eq_table.eq[vector].eqn;
+	cq_context->comp_eqn	    = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
 	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
 
 	mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@
 	init_completion(&cq->free);
 	cq->comp = mlx4_add_cq_to_tasklet;
 	cq->tasklet_ctx.priv =
-		&priv->eq_table.eq[cq->vector].tasklet_ctx;
+		&priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
 	INIT_LIST_HEAD(&cq->tasklet_ctx.list);
 
 
-	cq->irq = priv->eq_table.eq[cq->vector].irq;
+	cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
 	return 0;
 
 err_radix:
@@ -368,7 +368,10 @@
 	if (err)
 		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
 
-	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+	synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+	if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+	    priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+		synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 
 	spin_lock_irq(&cq_table->lock);
 	radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 22da4d0..63769df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,6 +66,7 @@
 
 	cq->ring = ring;
 	cq->is_tx = mode;
+	cq->vector = mdev->dev->caps.num_comp_vectors;
 
 	/* Allocate HW buffers on provided NUMA node.
 	 * dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@
 	int err = 0;
 	char name[25];
 	int timestamp_en = 0;
-	struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
-		priv->dev->rx_cpu_rmap;
-#else
-		NULL;
-#endif
+	bool assigned_eq = false;
 
 	cq->dev = mdev->pndev[priv->port];
 	cq->mcq.set_ci_db  = cq->wqres.db.db;
@@ -116,23 +112,19 @@
 	memset(cq->buf, 0, cq->buf_size);
 
 	if (cq->is_tx == RX) {
-		if (mdev->dev->caps.comp_pool) {
-			if (!cq->vector) {
-				sprintf(name, "%s-%d", priv->dev->name,
-					cq->ring);
-				/* Set IRQ for specific name (per ring) */
-				if (mlx4_assign_eq(mdev->dev, name, rmap,
-						   &cq->vector)) {
-					cq->vector = (cq->ring + 1 + priv->port)
-					    % mdev->dev->caps.num_comp_vectors;
-					mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
-						  name);
-				}
+		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+					     cq->vector)) {
+			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
 
+			err = mlx4_assign_eq(mdev->dev, priv->port,
+					     &cq->vector);
+			if (err) {
+				mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+					 name);
+				goto free_eq;
 			}
-		} else {
-			cq->vector = (cq->ring + 1 + priv->port) %
-				mdev->dev->caps.num_comp_vectors;
+
+			assigned_eq = true;
 		}
 
 		cq->irq_desc =
@@ -159,7 +151,7 @@
 			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
 			    cq->vector, 0, timestamp_en);
 	if (err)
-		return err;
+		goto free_eq;
 
 	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
 	cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@
 		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
 			       NAPI_POLL_WEIGHT);
 	} else {
-		struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
-		err = irq_set_affinity_hint(cq->mcq.irq,
-					    ring->affinity_mask);
-		if (err)
-			mlx4_warn(mdev, "Failed setting affinity hint\n");
-
 		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
 		napi_hash_add(&cq->napi);
 	}
@@ -182,6 +167,12 @@
 	napi_enable(&cq->napi);
 
 	return 0;
+
+free_eq:
+	if (assigned_eq)
+		mlx4_release_eq(mdev->dev, cq->vector);
+	cq->vector = mdev->dev->caps.num_comp_vectors;
+	return err;
 }
 
 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@
 
 	mlx4_en_unmap_buffer(&cq->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-	if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+	if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+	    cq->is_tx == RX)
 		mlx4_release_eq(priv->mdev->dev, cq->vector);
-	}
 	cq->vector = 0;
 	cq->buf_size = 0;
 	cq->buf = NULL;
@@ -207,7 +198,6 @@
 	if (!cq->is_tx) {
 		napi_hash_del(&cq->napi);
 		synchronize_rcu();
-		irq_set_affinity_hint(cq->mcq.irq, NULL);
 	}
 	netif_napi_del(&cq->napi);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a2ddf3d..99ba1c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -119,6 +119,12 @@
 	"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
 	"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
 
+	/* pf statistics */
+	"pf_rx_packets",
+	"pf_rx_bytes",
+	"pf_tx_packets",
+	"pf_tx_bytes",
+
 	/* priority flow control statistics rx */
 	"rx_pause_prio_0", "rx_pause_duration_prio_0",
 	"rx_pause_transition_prio_0",
@@ -368,6 +374,11 @@
 		if (bitmap_iterator_test(&it))
 			data[index++] = ((unsigned long *)&priv->port_stats)[i];
 
+	for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
+		if (bitmap_iterator_test(&it))
+			data[index++] =
+				((unsigned long *)&priv->pf_stats)[i];
+
 	for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
 	     i++, bitmap_iterator_inc(&it))
 		if (bitmap_iterator_test(&it))
@@ -448,6 +459,12 @@
 				strcpy(data + (index++) * ETH_GSTRING_LEN,
 				       main_strings[strings]);
 
+		for (i = 0; i < NUM_PF_STATS; i++, strings++,
+		     bitmap_iterator_inc(&it))
+			if (bitmap_iterator_test(&it))
+				strcpy(data + (index++) * ETH_GSTRING_LEN,
+				       main_strings[strings]);
+
 		for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
 		     bitmap_iterator_inc(&it))
 			if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index cf467a9..77179d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1597,6 +1597,9 @@
 	}
 	mdev->mac_removed[priv->port] = 0;
 
+	priv->counter_index =
+			mlx4_get_default_counter_index(mdev->dev, priv->port);
+
 	err = mlx4_en_config_rss_steer(priv);
 	if (err) {
 		en_err(priv, "Failed configuring rss steering\n");
@@ -1755,6 +1758,7 @@
 
 	/* Set port as not active */
 	priv->port_up = false;
+	priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
 
 	/* Promsicuous mode */
 	if (mdev->dev->caps.steering_mode ==
@@ -1891,6 +1895,7 @@
 	       sizeof(priv->rx_priority_flowstats));
 	memset(&priv->tx_priority_flowstats, 0,
 	       sizeof(priv->tx_priority_flowstats));
+	memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		priv->tx_ring[i]->bytes = 0;
@@ -1954,7 +1959,6 @@
 	int i;
 
 #ifdef CONFIG_RFS_ACCEL
-	free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
 	priv->dev->rx_cpu_rmap = NULL;
 #endif
 
@@ -2012,11 +2016,7 @@
 	}
 
 #ifdef CONFIG_RFS_ACCEL
-	if (priv->mdev->dev->caps.comp_pool) {
-		priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
-		if (!priv->dev->rx_cpu_rmap)
-			goto err;
-	}
+	priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
 #endif
 
 	return 0;
@@ -2292,6 +2292,15 @@
 	return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
 }
 
+static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+				struct ifla_vf_stats *vf_stats)
+{
+	struct mlx4_en_priv *en_priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = en_priv->mdev;
+
+	return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+}
+
 #define PORT_ID_BYTE_LEN 8
 static int mlx4_en_get_phys_port_id(struct net_device *dev,
 				    struct netdev_phys_item_id *ppid)
@@ -2489,6 +2498,7 @@
 	.ndo_set_vf_rate	= mlx4_en_set_vf_rate,
 	.ndo_set_vf_spoofchk	= mlx4_en_set_vf_spoofchk,
 	.ndo_set_vf_link_state	= mlx4_en_set_vf_link_state,
+	.ndo_get_vf_stats       = mlx4_en_get_vf_stats,
 	.ndo_get_vf_config	= mlx4_en_get_vf_config,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= mlx4_en_netpoll,
@@ -2686,7 +2696,7 @@
 				     u8 rx_ppp, u8 rx_pause,
 				     u8 tx_ppp, u8 tx_pause)
 {
-	int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
+	int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
 
 	if (!mlx4_is_slave(dev) &&
 	    (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
@@ -2748,6 +2758,11 @@
 	bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
 	last_i += NUM_PORT_STATS;
 
+	if (mlx4_is_master(dev))
+		bitmap_set(stats_bitmap->bitmap, last_i,
+			   NUM_PF_STATS);
+	last_i += NUM_PF_STATS;
+
 	mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
 					rx_ppp, rx_pause,
 					tx_ppp, tx_pause);
@@ -2783,6 +2798,7 @@
 
 	priv = netdev_priv(dev);
 	memset(priv, 0, sizeof(struct mlx4_en_priv));
+	priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
 	spin_lock_init(&priv->stats_lock);
 	INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
 	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a56f01..ee99e67 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -149,6 +149,7 @@
 
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 {
+	struct mlx4_counter tmp_counter_stats;
 	struct mlx4_en_stat_out_mbox *mlx4_en_stats;
 	struct mlx4_en_stat_out_flow_control_mbox *flowstats;
 	struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
@@ -156,7 +157,7 @@
 	struct mlx4_cmd_mailbox *mailbox;
 	u64 in_mod = reset << 8 | port;
 	int err;
-	int i;
+	int i, counter_index;
 
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
@@ -202,6 +203,20 @@
 		priv->port_stats.tso_packets       += ring->tso_packets;
 		priv->port_stats.xmit_more         += ring->xmit_more;
 	}
+	if (mlx4_is_master(mdev->dev)) {
+		stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
+						   &mlx4_en_stats->RTOT_prio_1,
+						   NUM_PRIORITIES);
+		stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
+						   &mlx4_en_stats->TTOT_prio_1,
+						   NUM_PRIORITIES);
+		stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
+						 &mlx4_en_stats->ROCT_prio_1,
+						 NUM_PRIORITIES);
+		stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
+						 &mlx4_en_stats->TOCT_prio_1,
+						 NUM_PRIORITIES);
+	}
 
 	/* net device stats */
 	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
@@ -296,6 +311,11 @@
 
 	spin_unlock_bh(&priv->stats_lock);
 
+	memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+	counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+	err = mlx4_get_counter_stats(mdev->dev, counter_index,
+				     &tmp_counter_stats, reset);
+
 	/* 0xffs indicates invalid value */
 	memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
 
@@ -314,6 +334,13 @@
 
 	spin_lock_bh(&priv->stats_lock);
 
+	if (tmp_counter_stats.counter_mode == 0) {
+		priv->pf_stats.rx_bytes   = be64_to_cpu(tmp_counter_stats.rx_bytes);
+		priv->pf_stats.tx_bytes   = be64_to_cpu(tmp_counter_stats.tx_bytes);
+		priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
+		priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
+	}
+
 	for (i = 0; i < MLX4_NUM_PRIORITIES; i++)	{
 		priv->rx_priority_flowstats[i].rx_pause =
 			be64_to_cpu(flowstats[i].rx_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 34f2fdf..e482fa1b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -66,7 +66,7 @@
 		context->pri_path.sched_queue |= user_prio << 3;
 		context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
 	}
-	context->pri_path.counter_index = 0xff;
+	context->pri_path.counter_index = priv->counter_index;
 	context->cqn_send = cpu_to_be32(cqn);
 	context->cqn_recv = cpu_to_be32(cqn);
 	context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2a77a6b..35f726c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -337,15 +337,10 @@
 	struct mlx4_dev *dev = mdev->dev;
 
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-		if (!dev->caps.comp_pool)
-			num_of_eqs = max_t(int, MIN_RX_RINGS,
-					   min_t(int,
-						 dev->caps.num_comp_vectors,
-						 DEF_RX_RINGS));
-		else
-			num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
-					   dev->caps.comp_pool/
-					   dev->caps.num_ports) - 1;
+		num_of_eqs = max_t(int, MIN_RX_RINGS,
+				   min_t(int,
+					 mlx4_get_eqs_per_port(mdev->dev, i),
+					 DEF_RX_RINGS));
 
 		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
 			min_t(int, num_of_eqs,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2619c9f..aae13ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -145,7 +145,7 @@
 	struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
 	struct mlx4_eqe *eqe;
 	u8 slave;
-	int i;
+	int i, phys_port, slave_port;
 
 	for (eqe = next_slave_event_eqe(slave_eq); eqe;
 	      eqe = next_slave_event_eqe(slave_eq)) {
@@ -154,9 +154,20 @@
 		/* All active slaves need to receive the event */
 		if (slave == ALL_SLAVES) {
 			for (i = 0; i <= dev->persist->num_vfs; i++) {
+				phys_port = 0;
+				if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
+				    eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
+					phys_port  = eqe->event.port_mgmt_change.port;
+					slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
+					if (slave_port < 0) /* VF doesn't have this port */
+						continue;
+					eqe->event.port_mgmt_change.port = slave_port;
+				}
 				if (mlx4_GEN_EQE(dev, i, eqe))
 					mlx4_warn(dev, "Failed to generate event for slave %d\n",
 						  i);
+				if (phys_port)
+					eqe->event.port_mgmt_change.port = phys_port;
 			}
 		} else {
 			if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -210,6 +221,22 @@
 	slave_event(dev, slave, eqe);
 }
 
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+	int hint_err;
+	struct mlx4_dev *dev = &priv->dev;
+	struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+	if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+		return;
+
+	hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+	if (hint_err)
+		mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
 {
 	struct mlx4_eqe eqe;
@@ -224,7 +251,7 @@
 
 	eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
 	eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
-	eqe.event.port_mgmt_change.port = port;
+	eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
 
 	return mlx4_GEN_EQE(dev, slave, &eqe);
 }
@@ -241,7 +268,7 @@
 
 	eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
 	eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
-	eqe.event.port_mgmt_change.port = port;
+	eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
 
 	return mlx4_GEN_EQE(dev, slave, &eqe);
 }
@@ -251,6 +278,7 @@
 				   u8 port_subtype_change)
 {
 	struct mlx4_eqe eqe;
+	u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
 
 	/*don't send if we don't have the that slave */
 	if (dev->persist->num_vfs < slave)
@@ -259,7 +287,7 @@
 
 	eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
 	eqe.subtype = port_subtype_change;
-	eqe.event.port_change.port = cpu_to_be32(port << 28);
+	eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
 
 	mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
 		 port_subtype_change, slave, port);
@@ -589,6 +617,10 @@
 						if (SLAVE_PORT_GEN_EVENT_DOWN ==  gen_event) {
 							if (i == mlx4_master_func_num(dev))
 								continue;
+							eqe->event.port_change.port =
+								cpu_to_be32(
+								(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+								| (mlx4_phys_to_slave_port(dev, i, port) << 28));
 							mlx4_slave_event(dev, i, eqe);
 						}
 					}
@@ -879,8 +911,8 @@
 	 * we need to map, take the difference of highest index and
 	 * the lowest index we'll use and add 1.
 	 */
-	return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
-		 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+	return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+		dev->caps.reserved_eqs / 4 + 1;
 }
 
 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1069,32 +1101,21 @@
 static void mlx4_free_irqs(struct mlx4_dev *dev)
 {
 	struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
-	struct mlx4_priv *priv = mlx4_priv(dev);
-	int	i, vec;
+	int	i;
 
 	if (eq_table->have_irq)
 		free_irq(dev->persist->pdev->irq, dev);
 
 	for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
 		if (eq_table->eq[i].have_irq) {
+			free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+			irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
 			free_irq(eq_table->eq[i].irq, eq_table->eq + i);
 			eq_table->eq[i].have_irq = 0;
 		}
 
-	for (i = 0; i < dev->caps.comp_pool; i++) {
-		/*
-		 * Freeing the assigned irq's
-		 * all bits should be 0, but we need to validate
-		 */
-		if (priv->msix_ctl.pool_bm & 1ULL << i) {
-			/* NO need protecting*/
-			vec = dev->caps.num_comp_vectors + 1 + i;
-			free_irq(priv->eq_table.eq[vec].irq,
-				 &priv->eq_table.eq[vec]);
-		}
-	}
-
-
 	kfree(eq_table->irq_names);
 }
 
@@ -1175,76 +1196,73 @@
 	}
 
 	priv->eq_table.irq_names =
-		kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
-					     dev->caps.comp_pool),
+		kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
 			GFP_KERNEL);
 	if (!priv->eq_table.irq_names) {
 		err = -ENOMEM;
-		goto err_out_bitmap;
+		goto err_out_clr_int;
 	}
 
-	for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
-		err = mlx4_create_eq(dev, dev->caps.num_cqs -
-					  dev->caps.reserved_cqs +
-					  MLX4_NUM_SPARE_EQE,
-				     (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-				     &priv->eq_table.eq[i]);
-		if (err) {
-			--i;
-			goto err_out_unmap;
+	for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+		if (i == MLX4_EQ_ASYNC) {
+			err = mlx4_create_eq(dev,
+					     MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+					     0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+		} else {
+			struct mlx4_eq	*eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+			int port = find_first_bit(eq->actv_ports.ports,
+						  dev->caps.num_ports) + 1;
+
+			if (port <= dev->caps.num_ports) {
+				struct mlx4_port_info *info =
+					&mlx4_priv(dev)->port[port];
+
+				if (!info->rmap) {
+					info->rmap = alloc_irq_cpu_rmap(
+						mlx4_get_eqs_per_port(dev, port));
+					if (!info->rmap) {
+						mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+						err = -ENOMEM;
+						goto err_out_unmap;
+					}
+				}
+
+				err = irq_cpu_rmap_add(
+					info->rmap, eq->irq);
+				if (err)
+					mlx4_warn(dev, "Failed adding irq rmap\n");
+			}
+#endif
+			err = mlx4_create_eq(dev, dev->caps.num_cqs -
+						  dev->caps.reserved_cqs +
+						  MLX4_NUM_SPARE_EQE,
+					     (dev->flags & MLX4_FLAG_MSI_X) ?
+					     i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+					     eq);
 		}
-	}
-
-	err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
-			     (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
-			     &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-	if (err)
-		goto err_out_comp;
-
-	/*if additional completion vectors poolsize is 0 this loop will not run*/
-	for (i = dev->caps.num_comp_vectors + 1;
-	      i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
-
-		err = mlx4_create_eq(dev, dev->caps.num_cqs -
-					  dev->caps.reserved_cqs +
-					  MLX4_NUM_SPARE_EQE,
-				     (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-				     &priv->eq_table.eq[i]);
-		if (err) {
-			--i;
+		if (err)
 			goto err_out_unmap;
-		}
 	}
 
-
 	if (dev->flags & MLX4_FLAG_MSI_X) {
 		const char *eq_name;
 
-		for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
-			if (i < dev->caps.num_comp_vectors) {
-				snprintf(priv->eq_table.irq_names +
-					 i * MLX4_IRQNAME_SIZE,
-					 MLX4_IRQNAME_SIZE,
-					 "mlx4-comp-%d@pci:%s", i,
-					 pci_name(dev->persist->pdev));
-			} else {
-				snprintf(priv->eq_table.irq_names +
-					 i * MLX4_IRQNAME_SIZE,
-					 MLX4_IRQNAME_SIZE,
-					 "mlx4-async@pci:%s",
-					 pci_name(dev->persist->pdev));
-			}
+		snprintf(priv->eq_table.irq_names +
+			 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+			 MLX4_IRQNAME_SIZE,
+			 "mlx4-async@pci:%s",
+			 pci_name(dev->persist->pdev));
+		eq_name = priv->eq_table.irq_names +
+			MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
 
-			eq_name = priv->eq_table.irq_names +
-				  i * MLX4_IRQNAME_SIZE;
-			err = request_irq(priv->eq_table.eq[i].irq,
-					  mlx4_msi_x_interrupt, 0, eq_name,
-					  priv->eq_table.eq + i);
-			if (err)
-				goto err_out_async;
+		err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+				  mlx4_msi_x_interrupt, 0, eq_name,
+				  priv->eq_table.eq + MLX4_EQ_ASYNC);
+		if (err)
+			goto err_out_unmap;
 
-			priv->eq_table.eq[i].have_irq = 1;
-		}
+		priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
 	} else {
 		snprintf(priv->eq_table.irq_names,
 			 MLX4_IRQNAME_SIZE,
@@ -1253,36 +1271,38 @@
 		err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
 				  IRQF_SHARED, priv->eq_table.irq_names, dev);
 		if (err)
-			goto err_out_async;
+			goto err_out_unmap;
 
 		priv->eq_table.have_irq = 1;
 	}
 
 	err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
-			  priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+			  priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 	if (err)
 		mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
-			   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+			   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
 
-	for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
-		eq_set_ci(&priv->eq_table.eq[i], 1);
+	/* arm ASYNC eq */
+	eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
 
 	return 0;
 
-err_out_async:
-	mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
-	i = dev->caps.num_comp_vectors - 1;
-
 err_out_unmap:
-	while (i >= 0) {
-		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
-		--i;
+	while (i >= 0)
+		mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+	for (i = 1; i <= dev->caps.num_ports; i++) {
+		if (mlx4_priv(dev)->port[i].rmap) {
+			free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+			mlx4_priv(dev)->port[i].rmap = NULL;
+		}
 	}
+#endif
+	mlx4_free_irqs(dev);
+
+err_out_clr_int:
 	if (!mlx4_is_slave(dev))
 		mlx4_unmap_clr_int(dev);
-	mlx4_free_irqs(dev);
 
 err_out_bitmap:
 	mlx4_unmap_uar(dev);
@@ -1300,11 +1320,19 @@
 	int i;
 
 	mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
-		    priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+		    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 
+#ifdef CONFIG_RFS_ACCEL
+	for (i = 1; i <= dev->caps.num_ports; i++) {
+		if (mlx4_priv(dev)->port[i].rmap) {
+			free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+			mlx4_priv(dev)->port[i].rmap = NULL;
+		}
+	}
+#endif
 	mlx4_free_irqs(dev);
 
-	for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+	for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
 		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
 	if (!mlx4_is_slave(dev))
@@ -1355,87 +1383,169 @@
 
 	/* Return to default */
 	mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
-		    priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+		    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 	return err;
 }
 EXPORT_SYMBOL(mlx4_test_interrupts);
 
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
-		   int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
 {
-
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	int vec = 0, err = 0, i;
+
+	vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+	if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+	    (vector == MLX4_EQ_ASYNC))
+		return false;
+
+	return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	unsigned int i;
+	unsigned int sum = 0;
+
+	for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+		sum += !!test_bit(port - 1,
+				  priv->eq_table.eq[i].actv_ports.ports);
+
+	return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+	if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+		return -EINVAL;
+
+	return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+				dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
+{
+	return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int err = 0, i = 0;
+	u32 min_ref_count_val = (u32)-1;
+	int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+	int *prequested_vector = NULL;
+
 
 	mutex_lock(&priv->msix_ctl.pool_lock);
-	for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
-		if (~priv->msix_ctl.pool_bm & 1ULL << i) {
-			priv->msix_ctl.pool_bm |= 1ULL << i;
-			vec = dev->caps.num_comp_vectors + 1 + i;
-			snprintf(priv->eq_table.irq_names +
-					vec * MLX4_IRQNAME_SIZE,
-					MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
-			if (rmap) {
-				err = irq_cpu_rmap_add(rmap,
-						       priv->eq_table.eq[vec].irq);
-				if (err)
-					mlx4_warn(dev, "Failed adding irq rmap\n");
-			}
-#endif
-			err = request_irq(priv->eq_table.eq[vec].irq,
-					  mlx4_msi_x_interrupt, 0,
-					  &priv->eq_table.irq_names[vec<<5],
-					  priv->eq_table.eq + vec);
-			if (err) {
-				/*zero out bit by fliping it*/
-				priv->msix_ctl.pool_bm ^= 1 << i;
-				vec = 0;
-				continue;
-				/*we dont want to break here*/
-			}
+	if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+	    (requested_vector >= 0) &&
+	    (requested_vector != MLX4_EQ_ASYNC)) {
+		if (test_bit(port - 1,
+			     priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+			prequested_vector = &requested_vector;
+		} else {
+			struct mlx4_eq *eq;
 
-			eq_set_ci(&priv->eq_table.eq[vec], 1);
+			for (i = 1; i < port;
+			     requested_vector += mlx4_get_eqs_per_port(dev, i++))
+				;
+
+			eq = &priv->eq_table.eq[requested_vector];
+			if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+			    test_bit(port - 1, eq->actv_ports.ports)) {
+				prequested_vector = &requested_vector;
+			}
 		}
 	}
+
+	if  (!prequested_vector) {
+		requested_vector = -1;
+		for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+		     i++) {
+			struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+			if (min_ref_count_val > eq->ref_count &&
+			    test_bit(port - 1, eq->actv_ports.ports)) {
+				min_ref_count_val = eq->ref_count;
+				requested_vector = i;
+			}
+		}
+
+		if (requested_vector < 0) {
+			err = -ENOSPC;
+			goto err_unlock;
+		}
+
+		prequested_vector = &requested_vector;
+	}
+
+	if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+	    dev->flags & MLX4_FLAG_MSI_X) {
+		set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+		snprintf(priv->eq_table.irq_names +
+			 *prequested_vector * MLX4_IRQNAME_SIZE,
+			 MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+			 *prequested_vector, dev_name(&dev->persist->pdev->dev));
+
+		err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+				  mlx4_msi_x_interrupt, 0,
+				  &priv->eq_table.irq_names[*prequested_vector << 5],
+				  priv->eq_table.eq + *prequested_vector);
+
+		if (err) {
+			clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+			*prequested_vector = -1;
+		} else {
+#if defined(CONFIG_SMP)
+			mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+			eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+			priv->eq_table.eq[*prequested_vector].have_irq = 1;
+		}
+	}
+
+	if (!err && *prequested_vector >= 0)
+		priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
 	mutex_unlock(&priv->msix_ctl.pool_lock);
 
-	if (vec) {
-		*vector = vec;
-	} else {
+	if (!err && *prequested_vector >= 0)
+		*vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+	else
 		*vector = 0;
-		err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
-	}
+
 	return err;
 }
 EXPORT_SYMBOL(mlx4_assign_eq);
 
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
-	return priv->eq_table.eq[vec].irq;
+	return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
 }
 EXPORT_SYMBOL(mlx4_eq_get_irq);
 
 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	/*bm index*/
-	int i = vec - dev->caps.num_comp_vectors - 1;
+	int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
 
-	if (likely(i >= 0)) {
-		/*sanity check , making sure were not trying to free irq's
-		  Belonging to a legacy EQ*/
-		mutex_lock(&priv->msix_ctl.pool_lock);
-		if (priv->msix_ctl.pool_bm & 1ULL << i) {
-			free_irq(priv->eq_table.eq[vec].irq,
-				 &priv->eq_table.eq[vec]);
-			priv->msix_ctl.pool_bm &= ~(1ULL << i);
-		}
-		mutex_unlock(&priv->msix_ctl.pool_lock);
-	}
+	mutex_lock(&priv->msix_ctl.pool_lock);
+	priv->eq_table.eq[eq_vec].ref_count--;
 
+	/* once we allocated EQ, we don't release it because it might be binded
+	 * to cpu_rmap.
+	 */
+	mutex_unlock(&priv->msix_ctl.pool_lock);
 }
 EXPORT_SYMBOL(mlx4_release_eq);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 70de39c..12fbfcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -479,7 +479,15 @@
 		}
 	}
 
-	dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+	if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
+	    (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
+	    (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
+		mlx4_warn(dev,
+			  "Granular QoS per VF not supported with IB/Eth configuration\n");
+		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
+	}
+
+	dev->caps.max_counters = dev_cap->max_counters;
 
 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
@@ -2212,20 +2220,73 @@
 static int mlx4_init_counters_table(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	int nent;
+	int nent_pow2;
 
 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
 		return -ENOENT;
 
-	nent = dev->caps.max_counters;
-	return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+	if (!dev->caps.max_counters)
+		return -ENOSPC;
+
+	nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
+	/* reserve last counter index for sink counter */
+	return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
+				nent_pow2 - 1, 0,
+				nent_pow2 - dev->caps.max_counters + 1);
 }
 
 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
 {
+	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+		return;
+
+	if (!dev->caps.max_counters)
+		return;
+
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
 }
 
+static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port;
+
+	for (port = 0; port < dev->caps.num_ports; port++)
+		if (priv->def_counter[port] != -1)
+			mlx4_counter_free(dev,  priv->def_counter[port]);
+}
+
+static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port, err = 0;
+	u32 idx;
+
+	for (port = 0; port < dev->caps.num_ports; port++)
+		priv->def_counter[port] = -1;
+
+	for (port = 0; port < dev->caps.num_ports; port++) {
+		err = mlx4_counter_alloc(dev, &idx);
+
+		if (!err || err == -ENOSPC) {
+			priv->def_counter[port] = idx;
+		} else if (err == -ENOENT) {
+			err = 0;
+			continue;
+		} else {
+			mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
+				 __func__, port + 1, err);
+			mlx4_cleanup_default_counters(dev);
+			return err;
+		}
+
+		mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
+			 __func__, priv->def_counter[port], port + 1);
+	}
+
+	return err;
+}
+
 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2234,8 +2295,10 @@
 		return -ENOENT;
 
 	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
-	if (*idx == -1)
-		return -ENOMEM;
+	if (*idx == -1) {
+		*idx = MLX4_SINK_COUNTER_INDEX(dev);
+		return -ENOSPC;
+	}
 
 	return 0;
 }
@@ -2258,8 +2321,35 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
 
+static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
+				u8 counter_index)
+{
+	struct mlx4_cmd_mailbox *if_stat_mailbox;
+	int err;
+	u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
+
+	if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(if_stat_mailbox))
+		return PTR_ERR(if_stat_mailbox);
+
+	err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
+			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
+	return err;
+}
+
 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
 {
+	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+		return;
+
+	if (idx == MLX4_SINK_COUNTER_INDEX(dev))
+		return;
+
+	__mlx4_clear_if_stat(dev, idx);
+
 	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
 	return;
 }
@@ -2279,6 +2369,14 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_counter_free);
 
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	return priv->def_counter[port - 1];
+}
+EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
+
 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2383,11 +2481,11 @@
 	if (err) {
 		if (dev->flags & MLX4_FLAG_MSI_X) {
 			mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
-				  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+				  priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 			mlx4_warn(dev, "Trying again without MSI-X\n");
 		} else {
 			mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
-				 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+				 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
 		}
 
@@ -2414,10 +2512,18 @@
 		goto err_srq_table_free;
 	}
 
-	err = mlx4_init_counters_table(dev);
-	if (err && err != -ENOENT) {
-		mlx4_err(dev, "Failed to initialize counters table, aborting\n");
-		goto err_qp_table_free;
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_init_counters_table(dev);
+		if (err && err != -ENOENT) {
+			mlx4_err(dev, "Failed to initialize counters table, aborting\n");
+			goto err_qp_table_free;
+		}
+	}
+
+	err = mlx4_allocate_default_counters(dev);
+	if (err) {
+		mlx4_err(dev, "Failed to allocate default counters, aborting\n");
+		goto err_counters_table_free;
 	}
 
 	if (!mlx4_is_slave(dev)) {
@@ -2451,15 +2557,19 @@
 			if (err) {
 				mlx4_err(dev, "Failed to set port %d, aborting\n",
 					 port);
-				goto err_counters_table_free;
+				goto err_default_countes_free;
 			}
 		}
 	}
 
 	return 0;
 
+err_default_countes_free:
+	mlx4_cleanup_default_counters(dev);
+
 err_counters_table_free:
-	mlx4_cleanup_counters_table(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_cleanup_counters_table(dev);
 
 err_qp_table_free:
 	mlx4_cleanup_qp_table(dev);
@@ -2500,14 +2610,45 @@
 	return err;
 }
 
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+	int requested_cpu = 0;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_eq *eq;
+	int off = 0;
+	int i;
+
+	if (eqn > dev->caps.num_comp_vectors)
+		return -EINVAL;
+
+	for (i = 1; i < port; i++)
+		off += mlx4_get_eqs_per_port(dev, i);
+
+	requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+	/* Meaning EQs are shared, and this call comes from the second port */
+	if (requested_cpu < 0)
+		return 0;
+
+	eq = &priv->eq_table.eq[eqn];
+
+	if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+	return 0;
+}
+
 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct msix_entry *entries;
 	int i;
+	int port = 0;
 
 	if (msi_x) {
-		int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+		int nreq = dev->caps.num_ports * num_online_cpus() + 1;
 
 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
 			     nreq);
@@ -2522,20 +2663,55 @@
 		nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
 					     nreq);
 
-		if (nreq < 0) {
+		if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
 			kfree(entries);
 			goto no_msi;
-		} else if (nreq < MSIX_LEGACY_SZ +
-			   dev->caps.num_ports * MIN_MSIX_P_PORT) {
-			/*Working in legacy mode , all EQ's shared*/
-			dev->caps.comp_pool           = 0;
-			dev->caps.num_comp_vectors = nreq - 1;
-		} else {
-			dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
-			dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
 		}
-		for (i = 0; i < nreq; ++i)
-			priv->eq_table.eq[i].irq = entries[i].vector;
+		/* 1 is reserved for events (asyncrounous EQ) */
+		dev->caps.num_comp_vectors = nreq - 1;
+
+		priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+			    dev->caps.num_ports);
+
+		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+			if (i == MLX4_EQ_ASYNC)
+				continue;
+
+			priv->eq_table.eq[i].irq =
+				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+					    dev->caps.num_ports);
+				/* We don't set affinity hint when there
+				 * aren't enough EQs
+				 */
+			} else {
+				set_bit(port,
+					priv->eq_table.eq[i].actv_ports.ports);
+				if (mlx4_init_affinity_hint(dev, port + 1, i))
+					mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+						  i);
+			}
+			/* We divide the Eqs evenly between the two ports.
+			 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+			 * refers to the number of Eqs per port
+			 * (i.e eqs_per_port). Theoretically, we would like to
+			 * write something like (i + 1) % eqs_per_port == 0.
+			 * However, since there's an asynchronous Eq, we have
+			 * to skip over it by comparing this condition to
+			 * !!((i + 1) > MLX4_EQ_ASYNC).
+			 */
+			if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+			    ((i + 1) %
+			     (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+			    !!((i + 1) > MLX4_EQ_ASYNC))
+				/* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+				 * everything is shared anyway.
+				 */
+				port++;
+		}
 
 		dev->flags |= MLX4_FLAG_MSI_X;
 
@@ -2545,10 +2721,15 @@
 
 no_msi:
 	dev->caps.num_comp_vectors = 1;
-	dev->caps.comp_pool	   = 0;
 
-	for (i = 0; i < 2; ++i)
+	BUG_ON(MLX4_EQ_ASYNC >= 2);
+	for (i = 0; i < 2; ++i) {
 		priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+		if (i != MLX4_EQ_ASYNC) {
+			bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+				    dev->caps.num_ports);
+		}
+	}
 }
 
 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2613,6 +2794,10 @@
 	device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
 	device_remove_file(&info->dev->persist->pdev->dev,
 			   &info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+	free_irq_cpu_rmap(info->rmap);
+	info->rmap = NULL;
+#endif
 }
 
 static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2768,6 +2953,7 @@
 free_mem:
 	dev->persist->num_vfs = 0;
 	kfree(dev->dev_vfs);
+        dev->dev_vfs = NULL;
 	return dev_flags & ~MLX4_FLAG_MASTER;
 }
 
@@ -2919,6 +3105,7 @@
 								  existing_vfs,
 								  reset_flow);
 
+				mlx4_close_fw(dev);
 				mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
 				dev->flags = dev_flags;
 				if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -3007,18 +3194,6 @@
 	/* In master functions, the communication channel must be initialized
 	 * after obtaining its address from fw */
 	if (mlx4_is_master(dev)) {
-		int ib_ports = 0;
-
-		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-			ib_ports++;
-
-		if (ib_ports &&
-		    (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
-			mlx4_err(dev,
-				 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
-			err = -EINVAL;
-			goto err_close;
-		}
 		if (dev->caps.num_ports < 2 &&
 		    num_vfs_argc > 1) {
 			err = -EINVAL;
@@ -3055,7 +3230,7 @@
 	if (err)
 		goto err_master_mfunc;
 
-	priv->msix_ctl.pool_bm = 0;
+	bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
 	mutex_init(&priv->msix_ctl.pool_lock);
 
 	mlx4_enable_msi_x(dev);
@@ -3077,7 +3252,6 @@
 	    !mlx4_is_mfunc(dev)) {
 		dev->flags &= ~MLX4_FLAG_MSI_X;
 		dev->caps.num_comp_vectors = 1;
-		dev->caps.comp_pool	   = 0;
 		pci_disable_msix(pdev);
 		err = mlx4_setup_hca(dev);
 	}
@@ -3128,7 +3302,9 @@
 	for (--port; port >= 1; --port)
 		mlx4_cleanup_port_info(&priv->port[port]);
 
-	mlx4_cleanup_counters_table(dev);
+	mlx4_cleanup_default_counters(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_cleanup_counters_table(dev);
 	mlx4_cleanup_qp_table(dev);
 	mlx4_cleanup_srq_table(dev);
 	mlx4_cleanup_cq_table(dev);
@@ -3426,7 +3602,9 @@
 		mlx4_free_resource_tracker(dev,
 					   RES_TR_FREE_SLAVES_ONLY);
 
-	mlx4_cleanup_counters_table(dev);
+	mlx4_cleanup_default_counters(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_cleanup_counters_table(dev);
 	mlx4_cleanup_qp_table(dev);
 	mlx4_cleanup_srq_table(dev);
 	mlx4_cleanup_cq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 502d3dd..a092c5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -65,6 +65,8 @@
 
 #define INIT_HCA_TPT_MW_ENABLE          (1 << 7)
 
+#define MLX4_QUERY_IF_STAT_RESET	BIT(31)
+
 enum {
 	MLX4_HCR_BASE		= 0x80680,
 	MLX4_HCR_SIZE		= 0x0001c,
@@ -287,6 +289,12 @@
 #define MLX4_CQE_SIZE_MASK_STRIDE	0x3
 #define MLX4_EQE_SIZE_MASK_STRIDE	0x30
 
+#define MLX4_EQ_ASYNC			0
+#define MLX4_EQ_TO_CQ_VECTOR(vector)	((vector) - \
+					 !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector)	((vector) + \
+					 !!((int)(vector) >= MLX4_EQ_ASYNC))
+
 /*
  * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
  */
@@ -391,6 +399,9 @@
 	struct mlx4_buf_list   *page_list;
 	struct mlx4_mtt		mtt;
 	struct mlx4_eq_tasklet	tasklet_ctx;
+	struct mlx4_active_ports actv_ports;
+	u32			ref_count;
+	cpumask_var_t		affinity_mask;
 };
 
 struct mlx4_slave_eqe {
@@ -808,6 +819,7 @@
 	struct mlx4_vlan_table	vlan_table;
 	struct mlx4_roce_gid_table gid_table;
 	int			base_qpn;
+	struct cpu_rmap		*rmap;
 };
 
 struct mlx4_sense {
@@ -818,7 +830,7 @@
 };
 
 struct mlx4_msix_ctl {
-	u64		pool_bm;
+	DECLARE_BITMAP(pool_bm, MAX_MSIX);
 	struct mutex	pool_lock;
 };
 
@@ -864,6 +876,7 @@
 	struct mlx4_qp_table	qp_table;
 	struct mlx4_mcg_table	mcg_table;
 	struct mlx4_bitmap	counters_bitmap;
+	int			def_counter[MLX4_MAX_PORTS];
 
 	struct mlx4_catas_err	catas_err;
 
@@ -997,6 +1010,8 @@
 		     int start_index, int npages, u64 *page_list);
 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+			  struct mlx4_counter *data);
 int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
 void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d021f07..d5f9adb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -338,7 +338,7 @@
 	struct napi_struct	napi;
 	int size;
 	int buf_size;
-	unsigned vector;
+	int vector;
 	enum cq_type is_tx;
 	u16 moder_time;
 	u16 moder_cnt;
@@ -566,6 +566,7 @@
 #endif
 	struct mlx4_en_perf_stats pstats;
 	struct mlx4_en_pkt_stats pkstats;
+	struct mlx4_en_counter_stats pf_stats;
 	struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
 	struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
 	struct mlx4_en_flow_stats_rx rx_flowstats;
@@ -582,6 +583,7 @@
 	int base_tx_qpn;
 	struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
 	struct hwtstamp_config hwtstamp_config;
+	u32 counter_index;
 
 #ifdef CONFIG_MLX4_EN_DCB
 	struct ieee_ets ets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 0055583..7fd466c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -23,6 +23,14 @@
 #define NUM_PKT_STATS		43
 };
 
+struct mlx4_en_counter_stats {
+	unsigned long rx_packets;
+	unsigned long rx_bytes;
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+#define NUM_PF_STATS      4
+};
+
 struct mlx4_en_port_stats {
 	unsigned long tso_packets;
 	unsigned long xmit_more;
@@ -99,7 +107,7 @@
 };
 
 #define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
-			 NUM_FLOW_STATS + NUM_PERF_STATS)
+			 NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)
 
 #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
 				  sizeof(((struct net_device_stats *)0)->n))
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 2bf437aaf..bae8b22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -82,7 +82,6 @@
 
 	u64 total_size = 0;
 	struct mlx4_resource *profile;
-	struct mlx4_resource tmp;
 	struct sysinfo si;
 	int i, j;
 
@@ -149,11 +148,8 @@
 	 */
 	for (i = MLX4_RES_NUM; i > 0; --i)
 		for (j = 1; j < i; ++j) {
-			if (profile[j].size > profile[j - 1].size) {
-				tmp	       = profile[j];
-				profile[j]     = profile[j - 1];
-				profile[j - 1] = tmp;
-			}
+			if (profile[j].size > profile[j - 1].size)
+				swap(profile[j], profile[j - 1]);
 		}
 
 	for (i = 0; i < MLX4_RES_NUM; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b75214a..2026863 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -749,7 +749,7 @@
 
 	{
 		int sort[MLX4_NUM_QP_REGION];
-		int i, j, tmp;
+		int i, j;
 		int last_base = dev->caps.num_qps;
 
 		for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
@@ -758,11 +758,8 @@
 		for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
 			for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
 				if (dev->caps.reserved_qps_cnt[sort[j]] >
-				    dev->caps.reserved_qps_cnt[sort[j - 1]]) {
-					tmp             = sort[j];
-					sort[j]         = sort[j - 1];
-					sort[j - 1]     = tmp;
-				}
+				    dev->caps.reserved_qps_cnt[sort[j - 1]])
+					swap(sort[j], sort[j - 1]);
 			}
 		}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index bafe218..731423c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -46,8 +46,11 @@
 
 #include "mlx4.h"
 #include "fw.h"
+#include "mlx4_stats.h"
 
 #define MLX4_MAC_VALID		(1ull << 63)
+#define MLX4_PF_COUNTERS_PER_PORT	2
+#define MLX4_VF_COUNTERS_PER_PORT	1
 
 struct mac_res {
 	struct list_head list;
@@ -459,11 +462,21 @@
 	dev->quotas.mpt =
 		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 }
+
+static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+{
+	/* reduce the sink counter */
+	return (dev->caps.max_counters - 1 -
+		(MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
+		/ MLX4_MAX_PORTS;
+}
+
 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i, j;
 	int t;
+	int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 
 	priv->mfunc.master.res_tracker.slave_list =
 		kzalloc(dev->num_slaves * sizeof(struct slave_list),
@@ -499,6 +512,9 @@
 			res_alloc->allocated = kzalloc((dev->persist->
 							num_vfs + 1) *
 						       sizeof(int), GFP_KERNEL);
+		/* Reduce the sink counter */
+		if (i == RES_COUNTER)
+			res_alloc->res_free = dev->caps.max_counters - 1;
 
 		if (!res_alloc->quota || !res_alloc->guaranteed ||
 		    !res_alloc->allocated)
@@ -577,9 +593,17 @@
 				break;
 			case RES_COUNTER:
 				res_alloc->quota[t] = dev->caps.max_counters;
-				res_alloc->guaranteed[t] = 0;
 				if (t == mlx4_master_func_num(dev))
-					res_alloc->res_free = res_alloc->quota[t];
+					res_alloc->guaranteed[t] =
+						MLX4_PF_COUNTERS_PER_PORT *
+						MLX4_MAX_PORTS;
+				else if (t <= max_vfs_guarantee_counter)
+					res_alloc->guaranteed[t] =
+						MLX4_VF_COUNTERS_PER_PORT *
+						MLX4_MAX_PORTS;
+				else
+					res_alloc->guaranteed[t] = 0;
+				res_alloc->res_free -= res_alloc->guaranteed[t];
 				break;
 			default:
 				break;
@@ -700,6 +724,9 @@
 	}
 }
 
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+			  u8 slave, int port);
+
 static int update_vport_qp_param(struct mlx4_dev *dev,
 				 struct mlx4_cmd_mailbox *inbox,
 				 u8 slave, u32 qpn)
@@ -715,6 +742,10 @@
 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 
+	err = handle_counter(dev, qpc, slave, port);
+	if (err)
+		goto out;
+
 	if (MLX4_VGT != vp_oper->state.default_vlan) {
 		/* the reserved QPs (special, proxy, tunnel)
 		 * do not operate over vlans
@@ -859,6 +890,83 @@
 	spin_unlock_irq(mlx4_tlock(dev));
 }
 
+static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			     u64 in_param, u64 *out_param, int port);
+
+static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
+				   int counter_index)
+{
+	struct res_common *r;
+	struct res_counter *counter;
+	int ret = 0;
+
+	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+		return ret;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = find_res(dev, counter_index, RES_COUNTER);
+	if (!r || r->owner != slave)
+		ret = -EINVAL;
+	counter = container_of(r, struct res_counter, com);
+	if (!counter->port)
+		counter->port = port;
+
+	spin_unlock_irq(mlx4_tlock(dev));
+	return ret;
+}
+
+static int handle_unexisting_counter(struct mlx4_dev *dev,
+				     struct mlx4_qp_context *qpc, u8 slave,
+				     int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_common *tmp;
+	struct res_counter *counter;
+	u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
+	int err = 0;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry(tmp,
+			    &tracker->slave_list[slave].res_list[RES_COUNTER],
+			    list) {
+		counter = container_of(tmp, struct res_counter, com);
+		if (port == counter->port) {
+			qpc->pri_path.counter_index  = counter->com.res_id;
+			spin_unlock_irq(mlx4_tlock(dev));
+			return 0;
+		}
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	/* No existing counter, need to allocate a new counter */
+	err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
+				port);
+	if (err == -ENOENT) {
+		err = 0;
+	} else if (err && err != -ENOSPC) {
+		mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
+			 __func__, slave, err);
+	} else {
+		qpc->pri_path.counter_index = counter_idx;
+		mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
+			 __func__, slave, qpc->pri_path.counter_index);
+		err = 0;
+	}
+
+	return err;
+}
+
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+			  u8 slave, int port)
+{
+	if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
+		return handle_existing_counter(dev, slave, port,
+					       qpc->pri_path.counter_index);
+
+	return handle_unexisting_counter(dev, qpc, slave, port);
+}
+
 static struct res_common *alloc_qp_tr(int id)
 {
 	struct res_qp *ret;
@@ -952,7 +1060,7 @@
 	return &ret->com;
 }
 
-static struct res_common *alloc_counter_tr(int id)
+static struct res_common *alloc_counter_tr(int id, int port)
 {
 	struct res_counter *ret;
 
@@ -962,6 +1070,7 @@
 
 	ret->com.res_id = id;
 	ret->com.state = RES_COUNTER_ALLOCATED;
+	ret->port = port;
 
 	return &ret->com;
 }
@@ -1022,7 +1131,7 @@
 		pr_err("implementation missing\n");
 		return NULL;
 	case RES_COUNTER:
-		ret = alloc_counter_tr(id);
+		ret = alloc_counter_tr(id, extra);
 		break;
 	case RES_XRCD:
 		ret = alloc_xrcdn_tr(id);
@@ -1039,6 +1148,53 @@
 	return ret;
 }
 
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+			  struct mlx4_counter *data)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_common *tmp;
+	struct res_counter *counter;
+	int *counters_arr;
+	int i = 0, err = 0;
+
+	memset(data, 0, sizeof(*data));
+
+	counters_arr = kmalloc_array(dev->caps.max_counters,
+				     sizeof(*counters_arr), GFP_KERNEL);
+	if (!counters_arr)
+		return -ENOMEM;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry(tmp,
+			    &tracker->slave_list[slave].res_list[RES_COUNTER],
+			    list) {
+		counter = container_of(tmp, struct res_counter, com);
+		if (counter->port == port) {
+			counters_arr[i] = (int)tmp->res_id;
+			i++;
+		}
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+	counters_arr[i] = -1;
+
+	i = 0;
+
+	while (counters_arr[i] != -1) {
+		err = mlx4_get_counter_stats(dev, counters_arr[i], data,
+					     0);
+		if (err) {
+			memset(data, 0, sizeof(*data));
+			goto table_changed;
+		}
+		i++;
+	}
+
+table_changed:
+	kfree(counters_arr);
+	return 0;
+}
+
 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
 			 enum mlx4_resource type, int extra)
 {
@@ -2001,7 +2157,7 @@
 }
 
 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
-			     u64 in_param, u64 *out_param)
+			     u64 in_param, u64 *out_param, int port)
 {
 	u32 index;
 	int err;
@@ -2019,7 +2175,7 @@
 		return err;
 	}
 
-	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
+	err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
 	if (err) {
 		__mlx4_counter_free(dev, index);
 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
@@ -2101,7 +2257,7 @@
 
 	case RES_COUNTER:
 		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
-					vhcr->in_param, &vhcr->out_param);
+					vhcr->in_param, &vhcr->out_param, 0);
 		break;
 
 	case RES_XRCD:
@@ -2335,6 +2491,9 @@
 		return -EINVAL;
 
 	index = get_param_l(&in_param);
+	if (index == MLX4_SINK_COUNTER_INDEX(dev))
+		return 0;
+
 	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
 	if (err)
 		return err;
@@ -2703,6 +2862,10 @@
 	context->qkey = cpu_to_be32(qkey);
 }
 
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+				 struct mlx4_qp_context *qpc,
+				 struct mlx4_cmd_mailbox *inbox);
+
 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
 			     struct mlx4_vhcr *vhcr,
 			     struct mlx4_cmd_mailbox *inbox,
@@ -2725,6 +2888,10 @@
 	struct res_srq *srq;
 	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
 
+	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+	if (err)
+		return err;
+
 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
 	if (err)
 		return err;
@@ -3526,8 +3693,8 @@
 	pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
 			  ((port & 1) << 6);
 
-	if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
-	    mlx4_is_eth(dev, port + 1)) {
+	if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+	    qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
 		qpc->pri_path.sched_queue = pri_sched_queue;
 	}
 
@@ -3965,6 +4132,22 @@
 	return 0;
 }
 
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+					 struct _rule_hw *eth_header)
+{
+	if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+	    is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+		struct mlx4_net_trans_rule_hw_eth *eth =
+			(struct mlx4_net_trans_rule_hw_eth *)eth_header;
+		struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+		bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+			next_rule->rsvd == 0;
+
+		if (last_rule)
+			ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+	}
+}
+
 /*
  * In case of missing eth header, append eth header with a MAC address
  * assigned to the VF.
@@ -4117,6 +4300,12 @@
 	rule_header = (struct _rule_hw *)(ctrl + 1);
 	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
 
+	if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+		handle_eth_header_mcast_prio(ctrl, rule_header);
+
+	if (slave == dev->caps.function)
+		goto execute;
+
 	switch (header_id) {
 	case MLX4_NET_TRANS_RULE_ID_ETH:
 		if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4143,6 +4332,7 @@
 		goto err_put;
 	}
 
+execute:
 	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
 			   vhcr->in_modifier, 0,
 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 8ff57e8..158c88c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,6 +3,18 @@
 #
 
 config MLX5_CORE
-	tristate
+	tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
 	depends on PCI
 	default n
+	---help---
+	  Core driver for low level functionality of the ConnectX-4 and
+	  Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+	bool "Mellanox Technologies ConnectX-4 Ethernet support"
+	depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+	default n
+	---help---
+	  Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+	  Ethernet and Infiniband support in ConnectX-4 are currently mutually
+	  exclusive.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 105780b..26a68b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,4 +2,7 @@
 
 mlx5_core-y :=	main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 		health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
-		mad.o
+		mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
+		en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+		en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf..0715b49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -42,95 +42,36 @@
 #include "mlx5_core.h"
 
 /* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
-		   struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
 {
 	dma_addr_t t;
 
 	buf->size = size;
-	if (size <= max_direct) {
-		buf->nbufs        = 1;
-		buf->npages       = 1;
-		buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-		buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-							size, &t, GFP_KERNEL);
-		if (!buf->direct.buf)
-			return -ENOMEM;
+	buf->npages       = 1;
+	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
+	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
+						size, &t, GFP_KERNEL);
+	if (!buf->direct.buf)
+		return -ENOMEM;
 
-		buf->direct.map = t;
+	buf->direct.map = t;
 
-		while (t & ((1 << buf->page_shift) - 1)) {
-			--buf->page_shift;
-			buf->npages *= 2;
-		}
-	} else {
-		int i;
-
-		buf->direct.buf  = NULL;
-		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-		buf->npages      = buf->nbufs;
-		buf->page_shift  = PAGE_SHIFT;
-		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
-					   GFP_KERNEL);
-		if (!buf->page_list)
-			return -ENOMEM;
-
-		for (i = 0; i < buf->nbufs; i++) {
-			buf->page_list[i].buf =
-				dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
-						    &t, GFP_KERNEL);
-			if (!buf->page_list[i].buf)
-				goto err_free;
-
-			buf->page_list[i].map = t;
-		}
-
-		if (BITS_PER_LONG == 64) {
-			struct page **pages;
-			pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
-			if (!pages)
-				goto err_free;
-			for (i = 0; i < buf->nbufs; i++)
-				pages[i] = virt_to_page(buf->page_list[i].buf);
-			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-			kfree(pages);
-			if (!buf->direct.buf)
-				goto err_free;
-		}
+	while (t & ((1 << buf->page_shift) - 1)) {
+		--buf->page_shift;
+		buf->npages *= 2;
 	}
 
 	return 0;
-
-err_free:
-	mlx5_buf_free(dev, buf);
-
-	return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 {
-	int i;
-
-	if (buf->nbufs == 1)
-		dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
-				  buf->direct.map);
-	else {
-		if (BITS_PER_LONG == 64)
-			vunmap(buf->direct.buf);
-
-		for (i = 0; i < buf->nbufs; i++)
-			if (buf->page_list[i].buf)
-				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-						  buf->page_list[i].buf,
-						  buf->page_list[i].map);
-		kfree(buf->page_list);
-	}
+	dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+			  buf->direct.map);
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
@@ -230,10 +171,7 @@
 	int i;
 
 	for (i = 0; i < buf->npages; i++) {
-		if (buf->nbufs == 1)
-			addr = buf->direct.map + (i << buf->page_shift);
-		else
-			addr = buf->page_list[i].map;
+		addr = buf->direct.map + (i << buf->page_shift);
 
 		pas[i] = cpu_to_be64(addr);
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e3273fa..75ff58d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -75,25 +75,6 @@
 	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
 };
 
-enum {
-	MLX5_CMD_STAT_OK			= 0x0,
-	MLX5_CMD_STAT_INT_ERR			= 0x1,
-	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
-	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
-	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
-	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
-	MLX5_CMD_STAT_RES_BUSY			= 0x6,
-	MLX5_CMD_STAT_LIM_ERR			= 0x8,
-	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
-	MLX5_CMD_STAT_IX_ERR			= 0xa,
-	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
-	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
-	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
-	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
-	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
-	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
-};
-
 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
 					   struct mlx5_cmd_msg *in,
 					   struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@
 	case MLX5_CMD_OP_ARM_RQ:
 		return "ARM_RQ";
 
-	case MLX5_CMD_OP_RESIZE_SRQ:
-		return "RESIZE_SRQ";
+	case MLX5_CMD_OP_CREATE_XRC_SRQ:
+		return "CREATE_XRC_SRQ";
+
+	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+		return "DESTROY_XRC_SRQ";
+
+	case MLX5_CMD_OP_QUERY_XRC_SRQ:
+		return "QUERY_XRC_SRQ";
+
+	case MLX5_CMD_OP_ARM_XRC_SRQ:
+		return "ARM_XRC_SRQ";
 
 	case MLX5_CMD_OP_ALLOC_PD:
 		return "ALLOC_PD";
@@ -408,8 +398,8 @@
 	case MLX5_CMD_OP_ATTACH_TO_MCG:
 		return "ATTACH_TO_MCG";
 
-	case MLX5_CMD_OP_DETACH_FROM_MCG:
-		return "DETACH_FROM_MCG";
+	case MLX5_CMD_OP_DETTACH_FROM_MCG:
+		return "DETTACH_FROM_MCG";
 
 	case MLX5_CMD_OP_ALLOC_XRCD:
 		return "ALLOC_XRCD";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index eb0cf81..04ab7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -219,6 +219,24 @@
 }
 EXPORT_SYMBOL(mlx5_core_modify_cq);
 
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+				   struct mlx5_core_cq *cq,
+				   u16 cq_period,
+				   u16 cq_max_count)
+{
+	struct mlx5_modify_cq_mbox_in in;
+
+	memset(&in, 0, sizeof(in));
+
+	in.cqn              = cpu_to_be32(cq->cqn);
+	in.ctx.cq_period    = cpu_to_be16(cq_period);
+	in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+	in.field_select     = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+					  MLX5_CQ_MODIFY_COUNT);
+
+	return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
 int mlx5_init_cq_table(struct mlx5_core_dev *dev)
 {
 	struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644
index 0000000..3d23bd6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/vport.h>
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC	8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
+
+#define MLX5E_TX_CQ_POLL_BUDGET        128
+#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+	/* vport statistics */
+	"rx_packets",
+	"rx_bytes",
+	"tx_packets",
+	"tx_bytes",
+	"rx_error_packets",
+	"rx_error_bytes",
+	"tx_error_packets",
+	"tx_error_bytes",
+	"rx_unicast_packets",
+	"rx_unicast_bytes",
+	"tx_unicast_packets",
+	"tx_unicast_bytes",
+	"rx_multicast_packets",
+	"rx_multicast_bytes",
+	"tx_multicast_packets",
+	"tx_multicast_bytes",
+	"rx_broadcast_packets",
+	"rx_broadcast_bytes",
+	"tx_broadcast_packets",
+	"tx_broadcast_bytes",
+
+	/* SW counters */
+	"tso_packets",
+	"tso_bytes",
+	"lro_packets",
+	"lro_bytes",
+	"rx_csum_good",
+	"rx_csum_none",
+	"tx_csum_offload",
+	"tx_queue_stopped",
+	"tx_queue_wake",
+	"tx_queue_dropped",
+	"rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+	/* HW counters */
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 rx_error_packets;
+	u64 rx_error_bytes;
+	u64 tx_error_packets;
+	u64 tx_error_bytes;
+	u64 rx_unicast_packets;
+	u64 rx_unicast_bytes;
+	u64 tx_unicast_packets;
+	u64 tx_unicast_bytes;
+	u64 rx_multicast_packets;
+	u64 rx_multicast_bytes;
+	u64 tx_multicast_packets;
+	u64 tx_multicast_bytes;
+	u64 rx_broadcast_packets;
+	u64 rx_broadcast_bytes;
+	u64 tx_broadcast_packets;
+	u64 tx_broadcast_bytes;
+
+	/* SW counters */
+	u64 tso_packets;
+	u64 tso_bytes;
+	u64 lro_packets;
+	u64 lro_bytes;
+	u64 rx_csum_good;
+	u64 rx_csum_none;
+	u64 tx_csum_offload;
+	u64 tx_queue_stopped;
+	u64 tx_queue_wake;
+	u64 tx_queue_dropped;
+	u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS     31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+	"packets",
+	"csum_none",
+	"lro_packets",
+	"lro_bytes",
+	"wqe_err"
+};
+
+struct mlx5e_rq_stats {
+	u64 packets;
+	u64 csum_none;
+	u64 lro_packets;
+	u64 lro_bytes;
+	u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+	"packets",
+	"tso_packets",
+	"tso_bytes",
+	"csum_offload_none",
+	"stopped",
+	"wake",
+	"dropped",
+	"nop"
+};
+
+struct mlx5e_sq_stats {
+	u64 packets;
+	u64 tso_packets;
+	u64 tso_bytes;
+	u64 csum_offload_none;
+	u64 stopped;
+	u64 wake;
+	u64 dropped;
+	u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+	struct mlx5e_vport_stats   vport;
+};
+
+struct mlx5e_params {
+	u8  log_sq_size;
+	u8  log_rq_size;
+	u16 num_channels;
+	u8  default_vlan_prio;
+	u8  num_tc;
+	u16 rx_cq_moderation_usec;
+	u16 rx_cq_moderation_pkts;
+	u16 tx_cq_moderation_usec;
+	u16 tx_cq_moderation_pkts;
+	u16 min_rx_wqes;
+	u16 rx_hash_log_tbl_sz;
+	bool lro_en;
+	u32 lro_wqe_sz;
+};
+
+enum {
+	MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+	MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+	/* data path - accessed per cqe */
+	struct mlx5_cqwq           wq;
+	unsigned long              flags;
+
+	/* data path - accessed per napi poll */
+	struct napi_struct        *napi;
+	struct mlx5_core_cq        mcq;
+	struct mlx5e_channel      *channel;
+
+	/* control */
+	struct mlx5_wq_ctrl        wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+	/* data path */
+	struct mlx5_wq_ll      wq;
+	u32                    wqe_sz;
+	struct sk_buff       **skb;
+
+	struct device         *pdev;
+	struct net_device     *netdev;
+	struct mlx5e_rq_stats  stats;
+	struct mlx5e_cq        cq;
+
+	unsigned long          state;
+	int                    ix;
+
+	/* control */
+	struct mlx5_wq_ctrl    wq_ctrl;
+	u32                    rqn;
+	struct mlx5e_channel  *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+	u32 num_bytes;
+	u8  num_wqebbs;
+	u8  num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+	dma_addr_t addr;
+	u32        size;
+};
+
+enum {
+	MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+	/* data path */
+
+	/* dirtied @completion */
+	u16                        cc;
+	u32                        dma_fifo_cc;
+
+	/* dirtied @xmit */
+	u16                        pc ____cacheline_aligned_in_smp;
+	u32                        dma_fifo_pc;
+	u32                        bf_offset;
+	struct mlx5e_sq_stats      stats;
+
+	struct mlx5e_cq            cq;
+
+	/* pointers to per packet info: write@xmit, read@completion */
+	struct sk_buff           **skb;
+	struct mlx5e_sq_dma       *dma_fifo;
+
+	/* read only */
+	struct mlx5_wq_cyc         wq;
+	u32                        dma_fifo_mask;
+	void __iomem              *uar_map;
+	struct netdev_queue       *txq;
+	u32                        sqn;
+	u32                        bf_buf_size;
+	u16                        max_inline;
+	u16                        edge;
+	struct device             *pdev;
+	__be32                     mkey_be;
+	unsigned long              state;
+
+	/* control path */
+	struct mlx5_wq_ctrl        wq_ctrl;
+	struct mlx5_uar            uar;
+	struct mlx5e_channel      *channel;
+	int                        tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+	return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+		(sq->cc  == sq->pc));
+}
+
+enum channel_flags {
+	MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+	/* data path */
+	struct mlx5e_rq            rq;
+	struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+	struct napi_struct         napi;
+	struct device             *pdev;
+	struct net_device         *netdev;
+	__be32                     mkey_be;
+	u8                         num_tc;
+	unsigned long              flags;
+	int                        tc_to_txq_map[MLX5E_MAX_NUM_TC];
+
+	/* control */
+	struct mlx5e_priv         *priv;
+	int                        ix;
+	int                        cpu;
+};
+
+enum mlx5e_traffic_types {
+	MLX5E_TT_IPV4_TCP = 0,
+	MLX5E_TT_IPV6_TCP = 1,
+	MLX5E_TT_IPV4_UDP = 2,
+	MLX5E_TT_IPV6_UDP = 3,
+	MLX5E_TT_IPV4     = 4,
+	MLX5E_TT_IPV6     = 5,
+	MLX5E_TT_ANY      = 6,
+	MLX5E_NUM_TT      = 7,
+};
+
+enum {
+	MLX5E_RQT_SPREADING  = 0,
+	MLX5E_RQT_DEFAULT_RQ = 1,
+	MLX5E_NUM_RQT        = 2,
+};
+
+struct mlx5e_eth_addr_info {
+	u8  addr[ETH_ALEN + 2];
+	u32 tt_vec;
+	u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+	struct hlist_head          netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+	struct hlist_head          netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+	struct mlx5e_eth_addr_info broadcast;
+	struct mlx5e_eth_addr_info allmulti;
+	struct mlx5e_eth_addr_info promisc;
+	bool                       broadcast_enabled;
+	bool                       allmulti_enabled;
+	bool                       promisc_enabled;
+};
+
+enum {
+	MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+	MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	u32           active_vlans_ft_ix[VLAN_N_VID];
+	u32           untagged_rule_ft_ix;
+	u32           any_vlan_rule_ft_ix;
+	bool          filter_disabled;
+};
+
+struct mlx5e_flow_table {
+	void *vlan;
+	void *main;
+};
+
+struct mlx5e_priv {
+	/* priv data path fields - start */
+	int                        num_tc;
+	int                        default_vlan_prio;
+	struct mlx5e_sq            **txq_to_sq_map;
+	/* priv data path fields - end */
+
+	unsigned long              state;
+	struct mutex               state_lock; /* Protects Interface state */
+	struct mlx5_uar            cq_uar;
+	u32                        pdn;
+	u32                        tdn;
+	struct mlx5_core_mr        mr;
+
+	struct mlx5e_channel     **channel;
+	u32                        tisn[MLX5E_MAX_NUM_TC];
+	u32                        rqtn;
+	u32                        tirn[MLX5E_NUM_TT];
+
+	struct mlx5e_flow_table    ft;
+	struct mlx5e_eth_addr_db   eth_addr;
+	struct mlx5e_vlan_db       vlan;
+
+	struct mlx5e_params        params;
+	spinlock_t                 async_events_spinlock; /* sync hw events */
+	struct work_struct         update_carrier_work;
+	struct work_struct         set_rx_mode_work;
+	struct delayed_work        update_stats_work;
+
+	struct mlx5_core_dev      *mdev;
+	struct net_device         *netdev;
+	struct mlx5e_stats         stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+	struct mlx5_wqe_ctrl_seg ctrl;
+	struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+	struct mlx5_wqe_srq_next_seg  next;
+	struct mlx5_wqe_data_seg      data;
+};
+
+enum mlx5e_link_mode {
+	MLX5E_1000BASE_CX_SGMII	 = 0,
+	MLX5E_1000BASE_KX	 = 1,
+	MLX5E_10GBASE_CX4	 = 2,
+	MLX5E_10GBASE_KX4	 = 3,
+	MLX5E_10GBASE_KR	 = 4,
+	MLX5E_20GBASE_KR2	 = 5,
+	MLX5E_40GBASE_CR4	 = 6,
+	MLX5E_40GBASE_KR4	 = 7,
+	MLX5E_56GBASE_R4	 = 8,
+	MLX5E_10GBASE_CR	 = 12,
+	MLX5E_10GBASE_SR	 = 13,
+	MLX5E_10GBASE_ER	 = 14,
+	MLX5E_40GBASE_SR4	 = 15,
+	MLX5E_40GBASE_LR4	 = 16,
+	MLX5E_100GBASE_CR4	 = 20,
+	MLX5E_100GBASE_SR4	 = 21,
+	MLX5E_100GBASE_KR4	 = 22,
+	MLX5E_100GBASE_LR4	 = 23,
+	MLX5E_100BASE_TX	 = 24,
+	MLX5E_100BASE_T		 = 25,
+	MLX5E_10GBASE_T		 = 26,
+	MLX5E_25GBASE_CR	 = 27,
+	MLX5E_25GBASE_KR	 = 28,
+	MLX5E_25GBASE_SR	 = 29,
+	MLX5E_50GBASE_CR2	 = 30,
+	MLX5E_50GBASE_KR2	 = 31,
+	MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+		       void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+			  u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+			   u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+			     struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+				      struct mlx5e_tx_wqe *wqe)
+{
+	/* ensure wqe is visible to device before updating doorbell record */
+	dma_wmb();
+
+	*sq->wq.db = cpu_to_be32(sq->pc);
+
+	/* ensure doorbell record is visible to device before ringing the
+	 * doorbell
+	 */
+	wmb();
+
+	mlx5_write64((__be32 *)&wqe->ctrl,
+		     sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+		     NULL);
+
+	sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+	struct mlx5_core_cq *mcq;
+
+	mcq = &cq->mcq;
+	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644
index 0000000..3889384
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d",
+		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+	strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+	u32 supported;
+	u32 advertised;
+	u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+	[MLX5E_1000BASE_CX_SGMII] = {
+		.supported  = SUPPORTED_1000baseKX_Full,
+		.advertised = ADVERTISED_1000baseKX_Full,
+		.speed      = 1000,
+	},
+	[MLX5E_1000BASE_KX] = {
+		.supported  = SUPPORTED_1000baseKX_Full,
+		.advertised = ADVERTISED_1000baseKX_Full,
+		.speed      = 1000,
+	},
+	[MLX5E_10GBASE_CX4] = {
+		.supported  = SUPPORTED_10000baseKX4_Full,
+		.advertised = ADVERTISED_10000baseKX4_Full,
+		.speed      = 10000,
+	},
+	[MLX5E_10GBASE_KX4] = {
+		.supported  = SUPPORTED_10000baseKX4_Full,
+		.advertised = ADVERTISED_10000baseKX4_Full,
+		.speed      = 10000,
+	},
+	[MLX5E_10GBASE_KR] = {
+		.supported  = SUPPORTED_10000baseKR_Full,
+		.advertised = ADVERTISED_10000baseKR_Full,
+		.speed      = 10000,
+	},
+	[MLX5E_20GBASE_KR2] = {
+		.supported  = SUPPORTED_20000baseKR2_Full,
+		.advertised = ADVERTISED_20000baseKR2_Full,
+		.speed      = 20000,
+	},
+	[MLX5E_40GBASE_CR4] = {
+		.supported  = SUPPORTED_40000baseCR4_Full,
+		.advertised = ADVERTISED_40000baseCR4_Full,
+		.speed      = 40000,
+	},
+	[MLX5E_40GBASE_KR4] = {
+		.supported  = SUPPORTED_40000baseKR4_Full,
+		.advertised = ADVERTISED_40000baseKR4_Full,
+		.speed      = 40000,
+	},
+	[MLX5E_56GBASE_R4] = {
+		.supported  = SUPPORTED_56000baseKR4_Full,
+		.advertised = ADVERTISED_56000baseKR4_Full,
+		.speed      = 56000,
+	},
+	[MLX5E_10GBASE_CR] = {
+		.supported  = SUPPORTED_10000baseKR_Full,
+		.advertised = ADVERTISED_10000baseKR_Full,
+		.speed      = 10000,
+	},
+	[MLX5E_10GBASE_SR] = {
+		.supported  = SUPPORTED_10000baseKR_Full,
+		.advertised = ADVERTISED_10000baseKR_Full,
+		.speed      = 10000,
+	},
+	[MLX5E_10GBASE_ER] = {
+		.supported  = SUPPORTED_10000baseKR_Full,
+		.advertised = ADVERTISED_10000baseKR_Full,
+		.speed      = 10000,
+	},
+	[MLX5E_40GBASE_SR4] = {
+		.supported  = SUPPORTED_40000baseSR4_Full,
+		.advertised = ADVERTISED_40000baseSR4_Full,
+		.speed      = 40000,
+	},
+	[MLX5E_40GBASE_LR4] = {
+		.supported  = SUPPORTED_40000baseLR4_Full,
+		.advertised = ADVERTISED_40000baseLR4_Full,
+		.speed      = 40000,
+	},
+	[MLX5E_100GBASE_CR4] = {
+		.speed      = 100000,
+	},
+	[MLX5E_100GBASE_SR4] = {
+		.speed      = 100000,
+	},
+	[MLX5E_100GBASE_KR4] = {
+		.speed      = 100000,
+	},
+	[MLX5E_100GBASE_LR4] = {
+		.speed      = 100000,
+	},
+	[MLX5E_100BASE_TX]   = {
+		.speed      = 100,
+	},
+	[MLX5E_100BASE_T]    = {
+		.supported  = SUPPORTED_100baseT_Full,
+		.advertised = ADVERTISED_100baseT_Full,
+		.speed      = 100,
+	},
+	[MLX5E_10GBASE_T]    = {
+		.supported  = SUPPORTED_10000baseT_Full,
+		.advertised = ADVERTISED_10000baseT_Full,
+		.speed      = 1000,
+	},
+	[MLX5E_25GBASE_CR]   = {
+		.speed      = 25000,
+	},
+	[MLX5E_25GBASE_KR]   = {
+		.speed      = 25000,
+	},
+	[MLX5E_25GBASE_SR]   = {
+		.speed      = 25000,
+	},
+	[MLX5E_50GBASE_CR2]  = {
+		.speed      = 50000,
+	},
+	[MLX5E_50GBASE_KR2]  = {
+		.speed      = 50000,
+	},
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return NUM_VPORT_COUNTERS +
+		       priv->params.num_channels * NUM_RQ_STATS +
+		       priv->params.num_channels * priv->num_tc *
+						   NUM_SQ_STATS;
+	/* fallthrough */
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+			      uint32_t stringset, uint8_t *data)
+{
+	int i, j, tc, idx = 0;
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	switch (stringset) {
+	case ETH_SS_PRIV_FLAGS:
+		break;
+
+	case ETH_SS_TEST:
+		break;
+
+	case ETH_SS_STATS:
+		/* VPORT counters */
+		for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+			strcpy(data + (idx++) * ETH_GSTRING_LEN,
+			       vport_strings[i]);
+
+		/* per channel counters */
+		for (i = 0; i < priv->params.num_channels; i++)
+			for (j = 0; j < NUM_RQ_STATS; j++)
+				sprintf(data + (idx++) * ETH_GSTRING_LEN,
+					"rx%d_%s", i, rq_stats_strings[j]);
+
+		for (i = 0; i < priv->params.num_channels; i++)
+			for (tc = 0; tc < priv->num_tc; tc++)
+				for (j = 0; j < NUM_SQ_STATS; j++)
+					sprintf(data +
+						(idx++) * ETH_GSTRING_LEN,
+						"tx%d_%d_%s", i, tc,
+						sq_stats_strings[j]);
+		break;
+	}
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int i, j, tc, idx = 0;
+
+	if (!data)
+		return;
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_update_stats(priv);
+	mutex_unlock(&priv->state_lock);
+
+	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+		data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+	/* per channel counters */
+	for (i = 0; i < priv->params.num_channels; i++)
+		for (j = 0; j < NUM_RQ_STATS; j++)
+			data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+						&priv->state) ? 0 :
+				       ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+	for (i = 0; i < priv->params.num_channels; i++)
+		for (tc = 0; tc < priv->num_tc; tc++)
+			for (j = 0; j < NUM_SQ_STATS; j++)
+				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+							&priv->state) ? 0 :
+				((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *param)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+	param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+	param->rx_pending     = 1 << priv->params.log_rq_size;
+	param->tx_pending     = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *param)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_params new_params;
+	u16 min_rx_wqes;
+	u8 log_rq_size;
+	u8 log_sq_size;
+	int err = 0;
+
+	if (param->rx_jumbo_pending) {
+		netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (param->rx_mini_pending) {
+		netdev_info(dev, "%s: rx_mini_pending not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+		netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+			    __func__, param->rx_pending,
+			    1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+		return -EINVAL;
+	}
+	if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+		netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+			    __func__, param->rx_pending,
+			    1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+		return -EINVAL;
+	}
+	if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+		netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+			    __func__, param->tx_pending,
+			    1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+		return -EINVAL;
+	}
+	if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+		netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+			    __func__, param->tx_pending,
+			    1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+		return -EINVAL;
+	}
+
+	log_rq_size = order_base_2(param->rx_pending);
+	log_sq_size = order_base_2(param->tx_pending);
+	min_rx_wqes = min_t(u16, param->rx_pending - 1,
+			    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+	if (log_rq_size == priv->params.log_rq_size &&
+	    log_sq_size == priv->params.log_sq_size &&
+	    min_rx_wqes == priv->params.min_rx_wqes)
+		return 0;
+
+	mutex_lock(&priv->state_lock);
+	new_params = priv->params;
+	new_params.log_rq_size = log_rq_size;
+	new_params.log_sq_size = log_sq_size;
+	new_params.min_rx_wqes = min_rx_wqes;
+	err = mlx5e_update_priv_params(priv, &new_params);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+			       struct ethtool_channels *ch)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+	ch->max_combined   = ncv;
+	ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+			      struct ethtool_channels *ch)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+	unsigned int count = ch->combined_count;
+	struct mlx5e_params new_params;
+	int err = 0;
+
+	if (!count) {
+		netdev_info(dev, "%s: combined_count=0 not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (ch->rx_count || ch->tx_count) {
+		netdev_info(dev, "%s: separate rx/tx count not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (count > ncv) {
+		netdev_info(dev, "%s: count (%d) > max (%d)\n",
+			    __func__, count, ncv);
+		return -EINVAL;
+	}
+
+	if (priv->params.num_channels == count)
+		return 0;
+
+	mutex_lock(&priv->state_lock);
+	new_params = priv->params;
+	new_params.num_channels = count;
+	err = mlx5e_update_priv_params(priv, &new_params);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation_usec;
+	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+	coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation_usec;
+	coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+	return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_channel *c;
+	int tc;
+	int i;
+
+	priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+	priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+	priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+	priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+	for (i = 0; i < priv->params.num_channels; ++i) {
+		c = priv->channel[i];
+
+		for (tc = 0; tc < c->num_tc; tc++) {
+			mlx5_core_modify_cq_moderation(mdev,
+						&c->sq[tc].cq.mcq,
+						coal->tx_coalesce_usecs,
+						coal->tx_max_coalesced_frames);
+		}
+
+		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+					       coal->rx_coalesce_usecs,
+					       coal->rx_max_coalesced_frames);
+	}
+
+	return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+	int i;
+	u32 supported_modes = 0;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (eth_proto_cap & MLX5E_PROT_MASK(i))
+			supported_modes |= ptys2ethtool_table[i].supported;
+	}
+	return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+	int i;
+	u32 advertising_modes = 0;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (eth_proto_cap & MLX5E_PROT_MASK(i))
+			advertising_modes |= ptys2ethtool_table[i].advertised;
+	}
+	return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+			   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+			   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+			   | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+			   | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+			   | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+		return SUPPORTED_FIBRE;
+	}
+
+	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+			   | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+			   | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+		return SUPPORTED_Backplane;
+	}
+	return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+			     u32 eth_proto_oper,
+			     struct ethtool_cmd *cmd)
+{
+	int i;
+	u32 speed = SPEED_UNKNOWN;
+	u8 duplex = DUPLEX_UNKNOWN;
+
+	if (!netif_carrier_ok(netdev))
+		goto out;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+			speed = ptys2ethtool_table[i].speed;
+			duplex = DUPLEX_FULL;
+			break;
+		}
+	}
+out:
+	ethtool_cmd_speed_set(cmd, speed);
+	cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+	*supported |= ptys2ethtool_supported_port(eth_proto_cap);
+	*supported |= ptys2ethtool_supported_link(eth_proto_cap);
+	*supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+			    u8 rx_pause, u32 *advertising)
+{
+	*advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+	*advertising |= tx_pause ? ADVERTISED_Pause : 0;
+	*advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+			 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+			 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+			 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+			return PORT_FIBRE;
+	}
+
+	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+			 | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+			 | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+			return PORT_DA;
+	}
+
+	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+			 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+			 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+			 | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+			return PORT_NONE;
+	}
+
+	return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+	*lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+			      struct ethtool_cmd *cmd)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	u32 eth_proto_lp;
+	u32 eth_proto_oper;
+	int err;
+
+	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+
+	if (err) {
+		netdev_err(netdev, "%s: query port ptys failed: %d\n",
+			   __func__, err);
+		goto err_query_ptys;
+	}
+
+	eth_proto_cap   = MLX5_GET(ptys_reg, out, eth_proto_capability);
+	eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+	eth_proto_oper  = MLX5_GET(ptys_reg, out, eth_proto_oper);
+	eth_proto_lp    = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+	cmd->supported   = 0;
+	cmd->advertising = 0;
+
+	get_supported(eth_proto_cap, &cmd->supported);
+	get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+	get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+	cmd->port = get_connector_port(eth_proto_oper);
+	get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+	cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+	return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+	u32 i, ptys_modes = 0;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (ptys2ethtool_table[i].advertised & link_modes)
+			ptys_modes |= MLX5E_PROT_MASK(i);
+	}
+
+	return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+	u32 i, speed_links = 0;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (ptys2ethtool_table[i].speed == speed)
+			speed_links |= MLX5E_PROT_MASK(i);
+	}
+
+	return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+			      struct ethtool_cmd *cmd)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 link_modes;
+	u32 speed;
+	u32 eth_proto_cap, eth_proto_admin;
+	u8 port_status;
+	int err;
+
+	speed = ethtool_cmd_speed(cmd);
+
+	link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+		mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+		mlx5e_ethtool2ptys_speed_link(speed);
+
+	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+	if (err) {
+		netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+			   __func__, err);
+		goto out;
+	}
+
+	link_modes = link_modes & eth_proto_cap;
+	if (!link_modes) {
+		netdev_err(netdev, "%s: Not supported link mode(s) requested",
+			   __func__);
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+	if (err) {
+		netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+			   __func__, err);
+		goto out;
+	}
+
+	if (link_modes == eth_proto_admin)
+		goto out;
+
+	err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+	if (err) {
+		netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+			   __func__, err);
+		goto out;
+	}
+
+	err = mlx5_query_port_status(mdev, &port_status);
+	if (err)
+		goto out;
+
+	if (port_status == MLX5_PORT_DOWN)
+		return 0;
+
+	err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+	if (err)
+		goto out;
+	err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+	return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+	.get_drvinfo       = mlx5e_get_drvinfo,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = mlx5e_get_strings,
+	.get_sset_count    = mlx5e_get_sset_count,
+	.get_ethtool_stats = mlx5e_get_ethtool_stats,
+	.get_ringparam     = mlx5e_get_ringparam,
+	.set_ringparam     = mlx5e_set_ringparam,
+	.get_channels      = mlx5e_get_channels,
+	.set_channels      = mlx5e_set_channels,
+	.get_coalesce      = mlx5e_get_coalesce,
+	.set_coalesce      = mlx5e_set_coalesce,
+	.get_settings      = mlx5e_get_settings,
+	.set_settings      = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644
index 0000000..120db80
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+	MLX5E_FULLMATCH = 0,
+	MLX5E_ALLMULTI  = 1,
+	MLX5E_PROMISC   = 2,
+};
+
+enum {
+	MLX5E_UC        = 0,
+	MLX5E_MC_IPV4   = 1,
+	MLX5E_MC_IPV6   = 2,
+	MLX5E_MC_OTHER  = 3,
+};
+
+enum {
+	MLX5E_ACTION_NONE = 0,
+	MLX5E_ACTION_ADD  = 1,
+	MLX5E_ACTION_DEL  = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+	struct hlist_node          hlist;
+	u8                         action;
+	struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+	return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+	struct mlx5e_eth_addr_hash_node *hn;
+	int ix = mlx5e_hash_eth_addr(addr);
+	int found = 0;
+
+	hlist_for_each_entry(hn, &hash[ix], hlist)
+		if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+			found = 1;
+			break;
+		}
+
+	if (found) {
+		hn->action = MLX5E_ACTION_NONE;
+		return;
+	}
+
+	hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+	if (!hn)
+		return;
+
+	ether_addr_copy(hn->ai.addr, addr);
+	hn->action = MLX5E_ACTION_ADD;
+
+	hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+	hlist_del(&hn->hlist);
+	kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+					       struct mlx5e_eth_addr_info *ai)
+{
+	void *ft = priv->ft.main;
+
+	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+	if (is_unicast_ether_addr(addr))
+		return MLX5E_UC;
+
+	if ((addr[0] == 0x01) &&
+	    (addr[1] == 0x00) &&
+	    (addr[2] == 0x5e) &&
+	   !(addr[3] &  0x80))
+		return MLX5E_MC_IPV4;
+
+	if ((addr[0] == 0x33) &&
+	    (addr[1] == 0x33))
+		return MLX5E_MC_IPV6;
+
+	return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+	int eth_addr_type;
+	u32 ret;
+
+	switch (type) {
+	case MLX5E_FULLMATCH:
+		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+		switch (eth_addr_type) {
+		case MLX5E_UC:
+			ret =
+				(1 << MLX5E_TT_IPV4_TCP) |
+				(1 << MLX5E_TT_IPV6_TCP) |
+				(1 << MLX5E_TT_IPV4_UDP) |
+				(1 << MLX5E_TT_IPV6_UDP) |
+				(1 << MLX5E_TT_IPV4)     |
+				(1 << MLX5E_TT_IPV6)     |
+				(1 << MLX5E_TT_ANY)      |
+				0;
+			break;
+
+		case MLX5E_MC_IPV4:
+			ret =
+				(1 << MLX5E_TT_IPV4_UDP) |
+				(1 << MLX5E_TT_IPV4)     |
+				0;
+			break;
+
+		case MLX5E_MC_IPV6:
+			ret =
+				(1 << MLX5E_TT_IPV6_UDP) |
+				(1 << MLX5E_TT_IPV6)     |
+				0;
+			break;
+
+		case MLX5E_MC_OTHER:
+			ret =
+				(1 << MLX5E_TT_ANY)      |
+				0;
+			break;
+		}
+
+		break;
+
+	case MLX5E_ALLMULTI:
+		ret =
+			(1 << MLX5E_TT_IPV4_UDP) |
+			(1 << MLX5E_TT_IPV6_UDP) |
+			(1 << MLX5E_TT_IPV4)     |
+			(1 << MLX5E_TT_IPV6)     |
+			(1 << MLX5E_TT_ANY)      |
+			0;
+		break;
+
+	default: /* MLX5E_PROMISC */
+		ret =
+			(1 << MLX5E_TT_IPV4_TCP) |
+			(1 << MLX5E_TT_IPV6_TCP) |
+			(1 << MLX5E_TT_IPV4_UDP) |
+			(1 << MLX5E_TT_IPV6_UDP) |
+			(1 << MLX5E_TT_IPV4)     |
+			(1 << MLX5E_TT_IPV6)     |
+			(1 << MLX5E_TT_ANY)      |
+			0;
+		break;
+	}
+
+	return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+				     struct mlx5e_eth_addr_info *ai, int type,
+				     void *flow_context, void *match_criteria)
+{
+	u8 match_criteria_enable = 0;
+	void *match_value;
+	void *dest;
+	u8   *dmac;
+	u8   *match_criteria_dmac;
+	void *ft   = priv->ft.main;
+	u32  *tirn = priv->tirn;
+	u32  tt_vec;
+	int  err;
+
+	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+	dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+			    outer_headers.dmac_47_16);
+	match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+					   outer_headers.dmac_47_16);
+	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+	MLX5_SET(flow_context, flow_context, action,
+		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+	MLX5_SET(dest_format_struct, dest, destination_type,
+		 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+	switch (type) {
+	case MLX5E_FULLMATCH:
+		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		memset(match_criteria_dmac, 0xff, ETH_ALEN);
+		ether_addr_copy(dmac, ai->addr);
+		break;
+
+	case MLX5E_ALLMULTI:
+		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		match_criteria_dmac[0] = 0x01;
+		dmac[0] = 0x01;
+		break;
+
+	case MLX5E_PROMISC:
+		break;
+	}
+
+	tt_vec = mlx5e_get_tt_vec(ai, type);
+
+	if (tt_vec & (1 << MLX5E_TT_ANY)) {
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_ANY]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_ANY]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_ANY);
+	}
+
+	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+			 outer_headers.ethertype);
+
+	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_IPV4]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+	}
+
+	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_IPV6]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+	}
+
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+			 outer_headers.ip_protocol);
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_UDP);
+
+	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_UDP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+	}
+
+	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_UDP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_TCP);
+
+	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_TCP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+	}
+
+	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_TCP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						&ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+		if (err) {
+			mlx5e_del_eth_addr_from_flow_table(priv, ai);
+			return err;
+		}
+		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+	}
+
+	return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+				   struct mlx5e_eth_addr_info *ai, int type)
+{
+	u32 *flow_context;
+	u32 *match_criteria;
+	int err;
+
+	flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+				      MLX5_ST_SZ_BYTES(dest_format_struct));
+	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!flow_context || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto add_eth_addr_rule_out;
+	}
+
+	err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+					match_criteria);
+	if (err)
+		netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+	kvfree(match_criteria);
+	kvfree(flow_context);
+	return err;
+}
+
+enum mlx5e_vlan_rule_type {
+	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+	MLX5E_VLAN_RULE_TYPE_ANY_VID,
+	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+	u8 match_criteria_enable = 0;
+	u32 *flow_context;
+	void *match_value;
+	void *dest;
+	u32 *match_criteria;
+	u32 *ft_ix;
+	int err;
+
+	flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+				      MLX5_ST_SZ_BYTES(dest_format_struct));
+	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!flow_context || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto add_vlan_rule_out;
+	}
+	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+	MLX5_SET(flow_context, flow_context, action,
+		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+	MLX5_SET(dest_format_struct, dest, destination_type,
+		 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+	MLX5_SET(dest_format_struct, dest, destination_id,
+		 mlx5_get_flow_table_id(priv->ft.main));
+
+	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+			 outer_headers.vlan_tag);
+
+	switch (rule_type) {
+	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+		ft_ix = &priv->vlan.untagged_rule_ft_ix;
+		break;
+	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+		ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+			 1);
+		break;
+	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+		ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+			 1);
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.first_vid);
+		MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+			 vid);
+		break;
+	}
+
+	err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+					match_criteria, flow_context, ft_ix);
+	if (err)
+		netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+	kvfree(match_criteria);
+	kvfree(flow_context);
+	return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+				enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+	switch (rule_type) {
+	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+		mlx5_del_flow_table_entry(priv->ft.vlan,
+					  priv->vlan.untagged_rule_ft_ix);
+		break;
+	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+		mlx5_del_flow_table_entry(priv->ft.vlan,
+					  priv->vlan.any_vlan_rule_ft_ix);
+		break;
+	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+		mlx5_del_flow_table_entry(priv->ft.vlan,
+					  priv->vlan.active_vlans_ft_ix[vid]);
+		break;
+	}
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+	WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+	if (priv->vlan.filter_disabled) {
+		priv->vlan.filter_disabled = false;
+		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					    0);
+	}
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+	WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+	if (!priv->vlan.filter_disabled) {
+		priv->vlan.filter_disabled = true;
+		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					    0);
+	}
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+			  u16 vid)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int err = 0;
+
+	mutex_lock(&priv->state_lock);
+
+	set_bit(vid, priv->vlan.active_vlans);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+					  vid);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+			   u16 vid)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	mutex_lock(&priv->state_lock);
+
+	clear_bit(vid, priv->vlan.active_vlans);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+	mutex_unlock(&priv->state_lock);
+
+	return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+	u16 vid;
+	int err;
+
+	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+					  vid);
+		if (err)
+			return err;
+	}
+
+	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+	if (err)
+		return err;
+
+	if (priv->vlan.filter_disabled) {
+		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					  0);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+	u16 vid;
+
+	if (priv->vlan.filter_disabled)
+		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+				 struct mlx5e_eth_addr_hash_node *hn)
+{
+	switch (hn->action) {
+	case MLX5E_ACTION_ADD:
+		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+		hn->action = MLX5E_ACTION_NONE;
+		break;
+
+	case MLX5E_ACTION_DEL:
+		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+		mlx5e_del_eth_addr_from_hash(hn);
+		break;
+	}
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	struct netdev_hw_addr *ha;
+
+	netif_addr_lock_bh(netdev);
+
+	mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+				   priv->netdev->dev_addr);
+
+	netdev_for_each_uc_addr(ha, netdev)
+		mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+	netdev_for_each_mc_addr(ha, netdev)
+		mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+	netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+	struct mlx5e_eth_addr_hash_node *hn;
+	struct hlist_node *tmp;
+	int i;
+
+	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+		mlx5e_execute_action(priv, hn);
+
+	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+		mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+	struct mlx5e_eth_addr_hash_node *hn;
+	struct hlist_node *tmp;
+	int i;
+
+	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+		hn->action = MLX5E_ACTION_DEL;
+	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+		hn->action = MLX5E_ACTION_DEL;
+
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_sync_netdev_addr(priv);
+
+	mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+	struct net_device *ndev = priv->netdev;
+
+	bool rx_mode_enable   = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+	bool broadcast_enabled = rx_mode_enable;
+
+	bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
+	bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
+	bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
+	bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
+	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
+	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
+
+	if (enable_promisc)
+		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+	if (enable_allmulti)
+		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+	if (enable_broadcast)
+		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+	mlx5e_handle_netdev_addr(priv);
+
+	if (disable_broadcast)
+		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+	if (disable_allmulti)
+		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+	if (disable_promisc)
+		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+	ea->promisc_enabled   = promisc_enabled;
+	ea->allmulti_enabled  = allmulti_enabled;
+	ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       set_rx_mode_work);
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_set_rx_mode_core(priv);
+	mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+	ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_table_group *g;
+	u8 *dmac;
+
+	g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+	if (!g)
+		return -ENOMEM;
+
+	g[0].log_sz = 2;
+	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+			 outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+			 outer_headers.ip_protocol);
+
+	g[1].log_sz = 1;
+	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+			 outer_headers.ethertype);
+
+	g[2].log_sz = 0;
+
+	g[3].log_sz = 14;
+	g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+			    outer_headers.dmac_47_16);
+	memset(dmac, 0xff, ETH_ALEN);
+	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+			 outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+			 outer_headers.ip_protocol);
+
+	g[4].log_sz = 13;
+	g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+			    outer_headers.dmac_47_16);
+	memset(dmac, 0xff, ETH_ALEN);
+	MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+			 outer_headers.ethertype);
+
+	g[5].log_sz = 11;
+	g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+			    outer_headers.dmac_47_16);
+	memset(dmac, 0xff, ETH_ALEN);
+
+	g[6].log_sz = 2;
+	g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+			    outer_headers.dmac_47_16);
+	dmac[0] = 0x01;
+	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+			 outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+			 outer_headers.ip_protocol);
+
+	g[7].log_sz = 1;
+	g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+			    outer_headers.dmac_47_16);
+	dmac[0] = 0x01;
+	MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+			 outer_headers.ethertype);
+
+	g[8].log_sz = 0;
+	g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+			    outer_headers.dmac_47_16);
+	dmac[0] = 0x01;
+	priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+					       MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+					       9, g);
+	kfree(g);
+
+	return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+	mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_table_group *g;
+
+	g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+	if (!g)
+		return -ENOMEM;
+
+	g[0].log_sz = 12;
+	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+			 outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+			 outer_headers.first_vid);
+
+	/* untagged + any vlan id */
+	g[1].log_sz = 1;
+	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+			 outer_headers.vlan_tag);
+
+	priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+					       MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+					       2, g);
+
+	kfree(g);
+	return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+	mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+	int err;
+
+	err = mlx5e_create_main_flow_table(priv);
+	if (err)
+		return err;
+
+	err = mlx5e_create_vlan_flow_table(priv);
+	if (err)
+		goto err_destroy_main_flow_table;
+
+	return 0;
+
+err_destroy_main_flow_table:
+	mlx5e_destroy_main_flow_table(priv);
+
+	return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+	mlx5e_destroy_vlan_flow_table(priv);
+	mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644
index 0000000..40206da
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -0,0 +1,1915 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+	u32                        rqc[MLX5_ST_SZ_DW(rqc)];
+	struct mlx5_wq_param       wq;
+};
+
+struct mlx5e_sq_param {
+	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
+	struct mlx5_wq_param       wq;
+};
+
+struct mlx5e_cq_param {
+	u32                        cqc[MLX5_ST_SZ_DW(cqc)];
+	struct mlx5_wq_param       wq;
+	u16                        eq_ix;
+};
+
+struct mlx5e_channel_param {
+	struct mlx5e_rq_param      rq;
+	struct mlx5e_sq_param      sq;
+	struct mlx5e_cq_param      rx_cq;
+	struct mlx5e_cq_param      tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 port_state;
+
+	port_state = mlx5_query_vport_state(mdev,
+		MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+	if (port_state == VPORT_STATE_UP)
+		netif_carrier_on(priv->netdev);
+	else
+		netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       update_carrier_work);
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_update_carrier(priv);
+	mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_vport_stats *s = &priv->stats.vport;
+	struct mlx5e_rq_stats *rq_stats;
+	struct mlx5e_sq_stats *sq_stats;
+	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+	u64 tx_offload_none;
+	int i, j;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return;
+
+	/* Collect firts the SW counters and then HW for consistency */
+	s->tso_packets		= 0;
+	s->tso_bytes		= 0;
+	s->tx_queue_stopped	= 0;
+	s->tx_queue_wake	= 0;
+	s->tx_queue_dropped	= 0;
+	tx_offload_none		= 0;
+	s->lro_packets		= 0;
+	s->lro_bytes		= 0;
+	s->rx_csum_none		= 0;
+	s->rx_wqe_err		= 0;
+	for (i = 0; i < priv->params.num_channels; i++) {
+		rq_stats = &priv->channel[i]->rq.stats;
+
+		s->lro_packets	+= rq_stats->lro_packets;
+		s->lro_bytes	+= rq_stats->lro_bytes;
+		s->rx_csum_none	+= rq_stats->csum_none;
+		s->rx_wqe_err   += rq_stats->wqe_err;
+
+		for (j = 0; j < priv->num_tc; j++) {
+			sq_stats = &priv->channel[i]->sq[j].stats;
+
+			s->tso_packets		+= sq_stats->tso_packets;
+			s->tso_bytes		+= sq_stats->tso_bytes;
+			s->tx_queue_stopped	+= sq_stats->stopped;
+			s->tx_queue_wake	+= sq_stats->wake;
+			s->tx_queue_dropped	+= sq_stats->dropped;
+			tx_offload_none		+= sq_stats->csum_offload_none;
+		}
+	}
+
+	/* HW counters */
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(query_vport_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+	memset(out, 0, outlen);
+
+	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+		goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+	MLX5_GET64(query_vport_counter_out, p, x)
+
+	s->rx_error_packets     =
+		MLX5_GET_CTR(out, received_errors.packets);
+	s->rx_error_bytes       =
+		MLX5_GET_CTR(out, received_errors.octets);
+	s->tx_error_packets     =
+		MLX5_GET_CTR(out, transmit_errors.packets);
+	s->tx_error_bytes       =
+		MLX5_GET_CTR(out, transmit_errors.octets);
+
+	s->rx_unicast_packets   =
+		MLX5_GET_CTR(out, received_eth_unicast.packets);
+	s->rx_unicast_bytes     =
+		MLX5_GET_CTR(out, received_eth_unicast.octets);
+	s->tx_unicast_packets   =
+		MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+	s->tx_unicast_bytes     =
+		MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+	s->rx_multicast_packets =
+		MLX5_GET_CTR(out, received_eth_multicast.packets);
+	s->rx_multicast_bytes   =
+		MLX5_GET_CTR(out, received_eth_multicast.octets);
+	s->tx_multicast_packets =
+		MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+	s->tx_multicast_bytes   =
+		MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+	s->rx_broadcast_packets =
+		MLX5_GET_CTR(out, received_eth_broadcast.packets);
+	s->rx_broadcast_bytes   =
+		MLX5_GET_CTR(out, received_eth_broadcast.octets);
+	s->tx_broadcast_packets =
+		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+	s->tx_broadcast_bytes   =
+		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+	s->rx_packets =
+		s->rx_unicast_packets +
+		s->rx_multicast_packets +
+		s->rx_broadcast_packets;
+	s->rx_bytes =
+		s->rx_unicast_bytes +
+		s->rx_multicast_bytes +
+		s->rx_broadcast_bytes;
+	s->tx_packets =
+		s->tx_unicast_packets +
+		s->tx_multicast_packets +
+		s->tx_broadcast_packets;
+	s->tx_bytes =
+		s->tx_unicast_bytes +
+		s->tx_multicast_bytes +
+		s->tx_broadcast_bytes;
+
+	/* Update calculated offload counters */
+	s->tx_csum_offload = s->tx_packets - tx_offload_none;
+	s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+
+free_out:
+	kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+					       update_stats_work);
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+		mlx5e_update_stats(priv);
+		schedule_delayed_work(dwork,
+				      msecs_to_jiffies(
+					      MLX5E_UPDATE_STATS_INTERVAL));
+	}
+	mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+				enum mlx5_dev_event event)
+{
+	switch (event) {
+	case MLX5_DEV_EVENT_PORT_UP:
+	case MLX5_DEV_EVENT_PORT_DOWN:
+		schedule_work(&priv->update_carrier_work);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+			      enum mlx5_dev_event event, unsigned long param)
+{
+	struct mlx5e_priv *priv = vpriv;
+
+	spin_lock(&priv->async_events_spinlock);
+	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+		__mlx5e_async_event(priv, event);
+	spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+	spin_lock_irq(&priv->async_events_spinlock);
+	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+	spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+			   struct mlx5e_rq_param *param,
+			   struct mlx5e_rq *rq)
+{
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqc = param->rqc;
+	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+	int wq_sz;
+	int err;
+	int i;
+
+	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+				&rq->wq_ctrl);
+	if (err)
+		return err;
+
+	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+	rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+			       cpu_to_node(c->cpu));
+	if (!rq->skb) {
+		err = -ENOMEM;
+		goto err_rq_wq_destroy;
+	}
+
+	rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+					     MLX5E_SW2HW_MTU(priv->netdev->mtu);
+	rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+
+	for (i = 0; i < wq_sz; i++) {
+		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+		u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+
+		wqe->data.lkey       = c->mkey_be;
+		wqe->data.byte_count =
+			cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+	}
+
+	rq->pdev    = c->pdev;
+	rq->netdev  = c->netdev;
+	rq->channel = c;
+	rq->ix      = c->ix;
+
+	return 0;
+
+err_rq_wq_destroy:
+	mlx5_wq_destroy(&rq->wq_ctrl);
+
+	return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+	kfree(rq->skb);
+	mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *rqc;
+	void *wq;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+		sizeof(u64) * rq->wq_ctrl.buf.npages;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+	wq  = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+	MLX5_SET(rqc,  rqc, cqn,		c->rq.cq.mcq.cqn);
+	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
+	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
+	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
+						PAGE_SHIFT);
+	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
+
+	mlx5_fill_page_array(&rq->wq_ctrl.buf,
+			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *rqc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+	MLX5_SET(rqc, rqc, state, next_state);
+
+	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_wq_ll *wq = &rq->wq;
+	int i;
+
+	for (i = 0; i < 1000; i++) {
+		if (wq->cur_sz >= priv->params.min_rx_wqes)
+			return 0;
+
+		msleep(20);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+			 struct mlx5e_rq_param *param,
+			 struct mlx5e_rq *rq)
+{
+	int err;
+
+	err = mlx5e_create_rq(c, param, rq);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_rq(rq, param);
+	if (err)
+		goto err_destroy_rq;
+
+	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+	if (err)
+		goto err_disable_rq;
+
+	set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+	mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+	return 0;
+
+err_disable_rq:
+	mlx5e_disable_rq(rq);
+err_destroy_rq:
+	mlx5e_destroy_rq(rq);
+
+	return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+	clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+	while (!mlx5_wq_ll_is_empty(&rq->wq))
+		msleep(20);
+
+	/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+	napi_synchronize(&rq->channel->napi);
+
+	mlx5e_disable_rq(rq);
+	mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+	kfree(sq->dma_fifo);
+	kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+	int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+	sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+	sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+				    numa);
+
+	if (!sq->skb || !sq->dma_fifo) {
+		mlx5e_free_sq_db(sq);
+		return -ENOMEM;
+	}
+
+	sq->dma_fifo_mask = df_sz - 1;
+
+	return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+			   int tc,
+			   struct mlx5e_sq_param *param,
+			   struct mlx5e_sq *sq)
+{
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *sqc = param->sqc;
+	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+	int txq_ix;
+	int err;
+
+	err = mlx5_alloc_map_uar(mdev, &sq->uar);
+	if (err)
+		return err;
+
+	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+				 &sq->wq_ctrl);
+	if (err)
+		goto err_unmap_free_uar;
+
+	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
+	sq->uar_map     = sq->uar.map;
+	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+	if (err)
+		goto err_sq_wq_destroy;
+
+	txq_ix = c->ix + tc * priv->params.num_channels;
+	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+
+	sq->pdev    = c->pdev;
+	sq->mkey_be = c->mkey_be;
+	sq->channel = c;
+	sq->tc      = tc;
+	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	priv->txq_to_sq_map[txq_ix] = sq;
+
+	return 0;
+
+err_sq_wq_destroy:
+	mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+	mlx5_unmap_free_uar(mdev, &sq->uar);
+
+	return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+
+	mlx5e_free_sq_db(sq);
+	mlx5_wq_destroy(&sq->wq_ctrl);
+	mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *sqc;
+	void *wq;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+		sizeof(u64) * sq->wq_ctrl.buf.npages;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+	wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+	MLX5_SET(sqc,  sqc, user_index,		sq->tc);
+	MLX5_SET(sqc,  sqc, tis_num_0,		priv->tisn[sq->tc]);
+	MLX5_SET(sqc,  sqc, cqn,		c->sq[sq->tc].cq.mcq.cqn);
+	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
+	MLX5_SET(sqc,  sqc, tis_lst_sz,		1);
+	MLX5_SET(sqc,  sqc, flush_in_error_en,	1);
+
+	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
+	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
+	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
+					  PAGE_SHIFT);
+	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
+
+	mlx5_fill_page_array(&sq->wq_ctrl.buf,
+			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *sqc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+	MLX5_SET(sqc, sqc, state, next_state);
+
+	err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+			 int tc,
+			 struct mlx5e_sq_param *param,
+			 struct mlx5e_sq *sq)
+{
+	int err;
+
+	err = mlx5e_create_sq(c, tc, param, sq);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_sq(sq, param);
+	if (err)
+		goto err_destroy_sq;
+
+	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+	if (err)
+		goto err_disable_sq;
+
+	set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+	netdev_tx_reset_queue(sq->txq);
+	netif_tx_start_queue(sq->txq);
+
+	return 0;
+
+err_disable_sq:
+	mlx5e_disable_sq(sq);
+err_destroy_sq:
+	mlx5e_destroy_sq(sq);
+
+	return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+	__netif_tx_lock_bh(txq);
+	netif_tx_stop_queue(txq);
+	__netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+	clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+	napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+	netif_tx_disable_queue(sq->txq);
+
+	/* ensure hw is notified of all pending wqes */
+	if (mlx5e_sq_has_room_for(sq, 1))
+		mlx5e_send_nop(sq, true);
+
+	mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+	while (sq->cc != sq->pc) /* wait till sq is empty */
+		msleep(20);
+
+	/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+	napi_synchronize(&sq->channel->napi);
+
+	mlx5e_disable_sq(sq);
+	mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+			   struct mlx5e_cq_param *param,
+			   struct mlx5e_cq *cq)
+{
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+	int eqn_not_used;
+	int irqn;
+	int err;
+	u32 i;
+
+	param->wq.numa = cpu_to_node(c->cpu);
+	param->eq_ix   = c->ix;
+
+	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+			       &cq->wq_ctrl);
+	if (err)
+		return err;
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+	cq->napi        = &c->napi;
+
+	mcq->cqe_sz     = 64;
+	mcq->set_ci_db  = cq->wq_ctrl.db.db;
+	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+	*mcq->set_ci_db = 0;
+	*mcq->arm_db    = 0;
+	mcq->vector     = param->eq_ix;
+	mcq->comp       = mlx5e_completion_event;
+	mcq->event      = mlx5e_cq_error_event;
+	mcq->irqn       = irqn;
+	mcq->uar        = &priv->cq_uar;
+
+	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+		cqe->op_own = 0xf1;
+	}
+
+	cq->channel = c;
+
+	return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+	mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+	struct mlx5e_channel *c = cq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+
+	void *in;
+	void *cqc;
+	int inlen;
+	int irqn_not_used;
+	int eqn;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+		sizeof(u64) * cq->wq_ctrl.buf.npages;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+	memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+	mlx5_fill_page_array(&cq->wq_ctrl.buf,
+			     (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
+	MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
+	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+					    PAGE_SHIFT);
+	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
+
+	err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+	kvfree(in);
+
+	if (err)
+		return err;
+
+	mlx5e_cq_arm(cq);
+
+	return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+	struct mlx5e_channel *c = cq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+			 struct mlx5e_cq_param *param,
+			 struct mlx5e_cq *cq,
+			 u16 moderation_usecs,
+			 u16 moderation_frames)
+{
+	int err;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	err = mlx5e_create_cq(c, param, cq);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_cq(cq, param);
+	if (err)
+		goto err_destroy_cq;
+
+	err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+					     moderation_usecs,
+					     moderation_frames);
+	if (err)
+		goto err_destroy_cq;
+
+	return 0;
+
+err_destroy_cq:
+	mlx5e_destroy_cq(cq);
+
+	return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+	mlx5e_disable_cq(cq);
+	mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+	return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+			     struct mlx5e_channel_param *cparam)
+{
+	struct mlx5e_priv *priv = c->priv;
+	int err;
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++) {
+		err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+				    priv->params.tx_cq_moderation_usec,
+				    priv->params.tx_cq_moderation_pkts);
+		if (err)
+			goto err_close_tx_cqs;
+	}
+
+	return 0;
+
+err_close_tx_cqs:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_close_cq(&c->sq[tc].cq);
+
+	return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++)
+		mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+			  struct mlx5e_channel_param *cparam)
+{
+	int err;
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++) {
+		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+		if (err)
+			goto err_close_sqs;
+	}
+
+	return 0;
+
+err_close_sqs:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_close_sq(&c->sq[tc]);
+
+	return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++)
+		mlx5e_close_sq(&c->sq[tc]);
+}
+
+static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
+				      int num_channels)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
+		c->tc_to_txq_map[i] = c->ix + i * num_channels;
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+			      struct mlx5e_channel_param *cparam,
+			      struct mlx5e_channel **cp)
+{
+	struct net_device *netdev = priv->netdev;
+	int cpu = mlx5e_get_cpu(priv, ix);
+	struct mlx5e_channel *c;
+	int err;
+
+	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+	if (!c)
+		return -ENOMEM;
+
+	c->priv     = priv;
+	c->ix       = ix;
+	c->cpu      = cpu;
+	c->pdev     = &priv->mdev->pdev->dev;
+	c->netdev   = priv->netdev;
+	c->mkey_be  = cpu_to_be32(priv->mr.key);
+	c->num_tc   = priv->num_tc;
+
+	mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+
+	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+	err = mlx5e_open_tx_cqs(c, cparam);
+	if (err)
+		goto err_napi_del;
+
+	err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+			    priv->params.rx_cq_moderation_usec,
+			    priv->params.rx_cq_moderation_pkts);
+	if (err)
+		goto err_close_tx_cqs;
+
+	napi_enable(&c->napi);
+
+	err = mlx5e_open_sqs(c, cparam);
+	if (err)
+		goto err_disable_napi;
+
+	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+	if (err)
+		goto err_close_sqs;
+
+	netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+	*cp = c;
+
+	return 0;
+
+err_close_sqs:
+	mlx5e_close_sqs(c);
+
+err_disable_napi:
+	napi_disable(&c->napi);
+	mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+	mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+	netif_napi_del(&c->napi);
+	kfree(c);
+
+	return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+	mlx5e_close_rq(&c->rq);
+	mlx5e_close_sqs(c);
+	napi_disable(&c->napi);
+	mlx5e_close_cq(&c->rq.cq);
+	mlx5e_close_tx_cqs(c);
+	netif_napi_del(&c->napi);
+	kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+				 struct mlx5e_rq_param *param)
+{
+	void *rqc = param->rqc;
+	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
+	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
+	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
+	MLX5_SET(wq, wq, pd,               priv->pdn);
+
+	param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+				 struct mlx5e_sq_param *param)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
+	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+	MLX5_SET(wq, wq, pd,            priv->pdn);
+
+	param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+					struct mlx5e_cq_param *param)
+{
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_cq_param *param)
+{
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
+
+	mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_cq_param *param)
+{
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
+
+	mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+				      struct mlx5e_channel_param *cparam)
+{
+	memset(cparam, 0, sizeof(*cparam));
+
+	mlx5e_build_rq_param(priv, &cparam->rq);
+	mlx5e_build_sq_param(priv, &cparam->sq);
+	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+	struct mlx5e_channel_param cparam;
+	int err = -ENOMEM;
+	int i;
+	int j;
+
+	priv->channel = kcalloc(priv->params.num_channels,
+				sizeof(struct mlx5e_channel *), GFP_KERNEL);
+
+	priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
+
+	if (!priv->channel || !priv->txq_to_sq_map)
+		goto err_free_txq_to_sq_map;
+
+	mlx5e_build_channel_param(priv, &cparam);
+	for (i = 0; i < priv->params.num_channels; i++) {
+		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+		if (err)
+			goto err_close_channels;
+	}
+
+	for (j = 0; j < priv->params.num_channels; j++) {
+		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+		if (err)
+			goto err_close_channels;
+	}
+
+	return 0;
+
+err_close_channels:
+	for (i--; i >= 0; i--)
+		mlx5e_close_channel(priv->channel[i]);
+
+err_free_txq_to_sq_map:
+	kfree(priv->txq_to_sq_map);
+	kfree(priv->channel);
+
+	return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->params.num_channels; i++)
+		mlx5e_close_channel(priv->channel[i]);
+
+	kfree(priv->txq_to_sq_map);
+	kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(tisc, tisc, prio,  tc);
+	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+	int num_tc = priv->num_tc;
+	int err;
+	int tc;
+
+	for (tc = 0; tc < num_tc; tc++) {
+		err = mlx5e_open_tis(priv, tc);
+		if (err)
+			goto err_close_tises;
+	}
+
+	return 0;
+
+err_close_tises:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_close_tis(priv, tc);
+
+	return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+	int num_tc = priv->num_tc;
+	int tc;
+
+	for (tc = 0; tc < num_tc; tc++)
+		mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 *in;
+	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+	void *rqtc;
+	int inlen;
+	int err;
+	int sz;
+	int i;
+
+	sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+	for (i = 0; i < sz; i++) {
+		int ix = i % priv->params.num_channels;
+
+		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+	}
+
+	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+	if (!err)
+		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+	mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+				   sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+			  MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+			  MLX5_HASH_FIELD_SEL_DST_IP   |\
+			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
+			  MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+	if (priv->params.lro_en) {
+		MLX5_SET(tirc, tirc, lro_enable_mask,
+			 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+			 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+		MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+			 (priv->params.lro_wqe_sz -
+			  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+			 MLX5_CAP_ETH(priv->mdev,
+				      lro_timer_supported_periods[3]));
+	}
+
+	switch (tt) {
+	case MLX5E_TT_ANY:
+		MLX5_SET(tirc, tirc, disp_type,
+			 MLX5_TIRC_DISP_TYPE_DIRECT);
+		MLX5_SET(tirc, tirc, inline_rqn,
+			 priv->channel[0]->rq.rqn);
+		break;
+	default:
+		MLX5_SET(tirc, tirc, disp_type,
+			 MLX5_TIRC_DISP_TYPE_INDIRECT);
+		MLX5_SET(tirc, tirc, indirect_table,
+			 priv->rqtn);
+		MLX5_SET(tirc, tirc, rx_hash_fn,
+			 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+		netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+						 rx_hash_toeplitz_key),
+				    MLX5_FLD_SZ_BYTES(tirc,
+						      rx_hash_toeplitz_key));
+		break;
+	}
+
+	switch (tt) {
+	case MLX5E_TT_IPV4_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_ALL);
+		break;
+
+	case MLX5E_TT_IPV6_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_ALL);
+		break;
+
+	case MLX5E_TT_IPV4_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_ALL);
+		break;
+
+	case MLX5E_TT_IPV6_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_ALL);
+		break;
+
+	case MLX5E_TT_IPV4:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+
+	case MLX5E_TT_IPV6:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+	}
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 *in;
+	void *tirc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+	mlx5e_build_tir_ctx(priv, tirc, tt);
+
+	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++) {
+		err = mlx5e_open_tir(priv, i);
+		if (err)
+			goto err_close_tirs;
+	}
+
+	return 0;
+
+err_close_tirs:
+	for (i--; i >= 0; i--)
+		mlx5e_close_tir(priv, i);
+
+	return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++)
+		mlx5e_close_tir(priv, i);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int hw_mtu;
+	int err;
+
+	err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+	if (err)
+		return err;
+
+	mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+	if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+		netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+			    __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+	netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+	return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int num_txqs;
+	int err;
+
+	num_txqs = priv->params.num_channels * priv->params.num_tc;
+	netif_set_real_num_tx_queues(netdev, num_txqs);
+	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+	err = mlx5e_set_dev_port_mtu(netdev);
+	if (err)
+		return err;
+
+	err = mlx5e_open_tises(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+			   __func__, err);
+		return err;
+	}
+
+	err = mlx5e_open_channels(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+			   __func__, err);
+		goto err_close_tises;
+	}
+
+	err = mlx5e_open_rqt(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+			   __func__, err);
+		goto err_close_channels;
+	}
+
+	err = mlx5e_open_tirs(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+			   __func__, err);
+		goto err_close_rqls;
+	}
+
+	err = mlx5e_open_flow_table(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+			   __func__, err);
+		goto err_close_tirs;
+	}
+
+	err = mlx5e_add_all_vlan_rules(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+			   __func__, err);
+		goto err_close_flow_table;
+	}
+
+	mlx5e_init_eth_addr(priv);
+
+	set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+	mlx5e_update_carrier(priv);
+	mlx5e_set_rx_mode_core(priv);
+
+	schedule_delayed_work(&priv->update_stats_work, 0);
+	return 0;
+
+err_close_flow_table:
+	mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+	mlx5e_close_tirs(priv);
+
+err_close_rqls:
+	mlx5e_close_rqt(priv);
+
+err_close_channels:
+	mlx5e_close_channels(priv);
+
+err_close_tises:
+	mlx5e_close_tises(priv);
+
+	return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+	err = mlx5e_open_locked(netdev);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+	mlx5e_set_rx_mode_core(priv);
+	mlx5e_del_all_vlan_rules(priv);
+	netif_carrier_off(priv->netdev);
+	mlx5e_close_flow_table(priv);
+	mlx5e_close_tirs(priv);
+	mlx5e_close_rqt(priv);
+	mlx5e_close_channels(priv);
+	mlx5e_close_tises(priv);
+
+	return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+	err = mlx5e_close_locked(netdev);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+			     struct mlx5e_params *new_params)
+{
+	int err = 0;
+	int was_opened;
+
+	WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(priv->netdev);
+
+	priv->params = *new_params;
+
+	if (was_opened)
+		err = mlx5e_open_locked(priv->netdev);
+
+	return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+	stats->rx_packets = vstats->rx_packets;
+	stats->rx_bytes   = vstats->rx_bytes;
+	stats->tx_packets = vstats->tx_packets;
+	stats->tx_bytes   = vstats->tx_bytes;
+	stats->multicast  = vstats->rx_multicast_packets +
+			    vstats->tx_multicast_packets;
+	stats->tx_errors  = vstats->tx_error_packets;
+	stats->rx_errors  = vstats->rx_error_packets;
+	stats->tx_dropped = vstats->tx_queue_dropped;
+	stats->rx_crc_errors = 0;
+	stats->rx_length_errors = 0;
+
+	return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct sockaddr *saddr = addr;
+
+	if (!is_valid_ether_addr(saddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	netif_addr_lock_bh(netdev);
+	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+	netif_addr_unlock_bh(netdev);
+
+	schedule_work(&priv->set_rx_mode_work);
+
+	return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+			      netdev_features_t features)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	netdev_features_t changes = features ^ netdev->features;
+	struct mlx5e_params new_params;
+	bool update_params = false;
+
+	mutex_lock(&priv->state_lock);
+	new_params = priv->params;
+
+	if (changes & NETIF_F_LRO) {
+		new_params.lro_en = !!(features & NETIF_F_LRO);
+		update_params = true;
+	}
+
+	if (update_params)
+		mlx5e_update_priv_params(priv, &new_params);
+
+	if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+			mlx5e_enable_vlan_filter(priv);
+		else
+			mlx5e_disable_vlan_filter(priv);
+	}
+
+	mutex_unlock(&priv->state_lock);
+
+	return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int max_mtu;
+	int err;
+
+	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+
+	if (new_mtu > max_mtu) {
+		netdev_err(netdev,
+			   "%s: Bad MTU (%d) > (%d) Max\n",
+			   __func__, new_mtu, max_mtu);
+		return -EINVAL;
+	}
+
+	mutex_lock(&priv->state_lock);
+	netdev->mtu = new_mtu;
+	err = mlx5e_update_priv_params(priv, &priv->params);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+	.ndo_open                = mlx5e_open,
+	.ndo_stop                = mlx5e_close,
+	.ndo_start_xmit          = mlx5e_xmit,
+	.ndo_get_stats64         = mlx5e_get_stats,
+	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
+	.ndo_set_mac_address     = mlx5e_set_mac,
+	.ndo_vlan_rx_add_vid	 = mlx5e_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	 = mlx5e_vlan_rx_kill_vid,
+	.ndo_set_features        = mlx5e_set_features,
+	.ndo_change_mtu		 = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return -ENOTSUPP;
+	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+	    !MLX5_CAP_ETH(mdev, csum_cap) ||
+	    !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+	    !MLX5_CAP_ETH(mdev, vlan_cap) ||
+	    !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+	    MLX5_CAP_FLOWTABLE(mdev,
+			       flow_table_properties_nic_receive.max_ft_level)
+			       < 3) {
+		mlx5_core_warn(mdev,
+			       "Not creating net device, some required device capabilities are missing\n");
+		return -ENOTSUPP;
+	}
+	return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+				    struct net_device *netdev,
+				    int num_comp_vectors)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	priv->params.log_sq_size           =
+		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+	priv->params.log_rq_size           =
+		MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+	priv->params.rx_cq_moderation_usec =
+		MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+	priv->params.rx_cq_moderation_pkts =
+		MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+	priv->params.tx_cq_moderation_usec =
+		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+	priv->params.tx_cq_moderation_pkts =
+		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+	priv->params.min_rx_wqes           =
+		MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+	priv->params.rx_hash_log_tbl_sz    =
+		(order_base_2(num_comp_vectors) >
+		 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+		order_base_2(num_comp_vectors)           :
+		MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+	priv->params.num_tc                = 1;
+	priv->params.default_vlan_prio     = 0;
+
+	priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+	priv->params.lro_wqe_sz            =
+		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+	priv->mdev                         = mdev;
+	priv->netdev                       = netdev;
+	priv->params.num_channels          = num_comp_vectors;
+	priv->num_tc                       = priv->params.num_tc;
+	priv->default_vlan_prio            = priv->params.default_vlan_prio;
+
+	spin_lock_init(&priv->async_events_spinlock);
+	mutex_init(&priv->state_lock);
+
+	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+	if (priv->num_tc > 1) {
+		mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+	}
+
+	netdev->netdev_ops        = &mlx5e_netdev_ops;
+	netdev->watchdog_timeo    = 15 * HZ;
+
+	netdev->ethtool_ops	  = &mlx5e_ethtool_ops;
+
+	netdev->vlan_features    |= NETIF_F_SG;
+	netdev->vlan_features    |= NETIF_F_IP_CSUM;
+	netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
+	netdev->vlan_features    |= NETIF_F_GRO;
+	netdev->vlan_features    |= NETIF_F_TSO;
+	netdev->vlan_features    |= NETIF_F_TSO6;
+	netdev->vlan_features    |= NETIF_F_RXCSUM;
+	netdev->vlan_features    |= NETIF_F_RXHASH;
+
+	if (!!MLX5_CAP_ETH(mdev, lro_cap))
+		netdev->vlan_features    |= NETIF_F_LRO;
+
+	netdev->hw_features       = netdev->vlan_features;
+	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
+	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	netdev->features          = netdev->hw_features;
+	if (!priv->params.lro_en)
+		netdev->features  &= ~NETIF_F_LRO;
+
+	netdev->features         |= NETIF_F_HIGHDMA;
+
+	netdev->priv_flags       |= IFF_UNICAST_FLT;
+
+	mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+			     struct mlx5_core_mr *mr)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_create_mkey_mbox_in *in;
+	int err;
+
+	in = mlx5_vzalloc(sizeof(*in));
+	if (!in)
+		return -ENOMEM;
+
+	in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+			MLX5_PERM_LOCAL_READ  |
+			MLX5_ACCESS_MODE_PA;
+	in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+	in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+	err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+				    NULL);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+	struct net_device *netdev;
+	struct mlx5e_priv *priv;
+	int ncv = mdev->priv.eq_table.num_comp_vectors;
+	int err;
+
+	if (mlx5e_check_required_hca_cap(mdev))
+		return NULL;
+
+	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+	if (!netdev) {
+		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+		return NULL;
+	}
+
+	mlx5e_build_netdev_priv(mdev, netdev, ncv);
+	mlx5e_build_netdev(netdev);
+
+	netif_carrier_off(netdev);
+
+	priv = netdev_priv(netdev);
+
+	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+			   __func__, err);
+		goto err_free_netdev;
+	}
+
+	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+			   __func__, err);
+		goto err_unmap_free_uar;
+	}
+
+	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
+			   __func__, err);
+		goto err_dealloc_pd;
+	}
+
+	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+			   __func__, err);
+		goto err_dealloc_transport_domain;
+	}
+
+	err = register_netdev(netdev);
+	if (err) {
+		netdev_err(netdev, "%s: register_netdev failed, %d\n",
+			   __func__, err);
+		goto err_destroy_mkey;
+	}
+
+	mlx5e_enable_async_events(priv);
+
+	return priv;
+
+err_destroy_mkey:
+	mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_transport_domain:
+	mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
+err_dealloc_pd:
+	mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+	free_netdev(netdev);
+
+	return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+	struct mlx5e_priv *priv = vpriv;
+	struct net_device *netdev = priv->netdev;
+
+	unregister_netdev(netdev);
+	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
+	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+	mlx5e_disable_async_events(priv);
+	flush_scheduled_work();
+	free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+	struct mlx5e_priv *priv = vpriv;
+
+	return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+	.add       = mlx5e_create_netdev,
+	.remove    = mlx5e_destroy_netdev,
+	.event     = mlx5e_async_event,
+	.protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
+	.get_dev   = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+	mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+	mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 0000000..9a93741
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+				     struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	dma_addr = dma_map_single(rq->pdev,
+				  /* hw start padding */
+				  skb->data,
+				  /* hw end padding */
+				  rq->wqe_sz,
+				  DMA_FROM_DEVICE);
+
+	if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+		goto err_free_skb;
+
+	skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+	*((dma_addr_t *)skb->cb) = dma_addr;
+	wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+	rq->skb[ix] = skb;
+
+	return 0;
+
+err_free_skb:
+	dev_kfree_skb(skb);
+
+	return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+	struct mlx5_wq_ll *wq = &rq->wq;
+
+	if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+		return false;
+
+	while (!mlx5_wq_ll_is_full(wq)) {
+		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+			break;
+
+		mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+	}
+
+	/* ensure wqes are visible to device before updating doorbell record */
+	dma_wmb();
+
+	mlx5_wq_ll_update_db_record(wq);
+
+	return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+	struct ethhdr	*eth	= (struct ethhdr *)(skb->data);
+	struct iphdr	*ipv4	= (struct iphdr *)(skb->data + ETH_HLEN);
+	struct ipv6hdr	*ipv6	= (struct ipv6hdr *)(skb->data + ETH_HLEN);
+	struct tcphdr	*tcp;
+
+	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+	int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
+		       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+	u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+	if (eth->h_proto == htons(ETH_P_IP)) {
+		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+					sizeof(struct iphdr));
+		ipv6 = NULL;
+	} else {
+		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+					sizeof(struct ipv6hdr));
+		ipv4 = NULL;
+	}
+
+	if (get_cqe_lro_tcppsh(cqe))
+		tcp->psh                = 1;
+
+	if (tcp_ack) {
+		tcp->ack                = 1;
+		tcp->ack_seq            = cqe->lro_ack_seq_num;
+		tcp->window             = cqe->lro_tcp_win;
+	}
+
+	if (ipv4) {
+		ipv4->ttl               = cqe->lro_min_ttl;
+		ipv4->tot_len           = cpu_to_be16(tot_len);
+		ipv4->check             = 0;
+		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
+						       ipv4->ihl);
+	} else {
+		ipv6->hop_limit         = cqe->lro_min_ttl;
+		ipv6->payload_len       = cpu_to_be16(tot_len -
+						      sizeof(struct ipv6hdr));
+	}
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+				      struct sk_buff *skb)
+{
+	u8 cht = cqe->rss_hash_type;
+	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+					    PKT_HASH_TYPE_NONE;
+	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+				      struct mlx5e_rq *rq,
+				      struct sk_buff *skb)
+{
+	struct net_device *netdev = rq->netdev;
+	u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+	int lro_num_seg;
+
+	skb_put(skb, cqe_bcnt);
+
+	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+	if (lro_num_seg > 1) {
+		mlx5e_lro_update_hdr(skb, cqe);
+		skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+		rq->stats.lro_packets++;
+		rq->stats.lro_bytes += cqe_bcnt;
+	}
+
+	if (likely(netdev->features & NETIF_F_RXCSUM) &&
+	    (cqe->hds_ip_ext & CQE_L2_OK) &&
+	    (cqe->hds_ip_ext & CQE_L3_OK) &&
+	    (cqe->hds_ip_ext & CQE_L4_OK)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else {
+		skb->ip_summed = CHECKSUM_NONE;
+		rq->stats.csum_none++;
+	}
+
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	skb_record_rx_queue(skb, rq->ix);
+
+	if (likely(netdev->features & NETIF_F_RXHASH))
+		mlx5e_skb_set_hash(cqe, skb);
+
+	if (cqe_has_vlan(cqe))
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+	int i;
+
+	/* avoid accessing cq (dma coherent memory) if not needed */
+	if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+		return false;
+
+	for (i = 0; i < budget; i++) {
+		struct mlx5e_rx_wqe *wqe;
+		struct mlx5_cqe64 *cqe;
+		struct sk_buff *skb;
+		__be16 wqe_counter_be;
+		u16 wqe_counter;
+
+		cqe = mlx5e_get_cqe(cq);
+		if (!cqe)
+			break;
+
+		mlx5_cqwq_pop(&cq->wq);
+
+		wqe_counter_be = cqe->wqe_counter;
+		wqe_counter    = be16_to_cpu(wqe_counter_be);
+		wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+		skb            = rq->skb[wqe_counter];
+		prefetch(skb->data);
+		rq->skb[wqe_counter] = NULL;
+
+		dma_unmap_single(rq->pdev,
+				 *((dma_addr_t *)skb->cb),
+				 rq->wqe_sz,
+				 DMA_FROM_DEVICE);
+
+		if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+			rq->stats.wqe_err++;
+			dev_kfree_skb(skb);
+			goto wq_ll_pop;
+		}
+
+		mlx5e_build_rx_skb(cqe, rq, skb);
+		rq->stats.packets++;
+		napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+		mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+			       &wqe->next.next_wqe_index);
+	}
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	if (i == budget) {
+		set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+		return true;
+	}
+
+	return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 0000000..03f28f4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+			    MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+	struct mlx5_wq_cyc                *wq  = &sq->wq;
+
+	u16 pi = sq->pc & wq->sz_m1;
+	struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+	struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
+
+	memset(cseg, 0, sizeof(*cseg));
+
+	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+	sq->skb[pi] = NULL;
+	sq->pc++;
+
+	if (notify_hw) {
+		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+		mlx5e_tx_notify_hw(sq, wqe);
+	}
+}
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+				      u32 *size)
+{
+	sq->dma_fifo_pc--;
+	*addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+	*size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+	dma_addr_t addr;
+	u32 size;
+	int i;
+
+	for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+		mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+		dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+	}
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+				  u32 size)
+{
+	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+	sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+				 u32 *size)
+{
+	*addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+	*size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+		       void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int channel_ix = fallback(dev, skb);
+	int up = skb_vlan_tag_present(skb)        ?
+		 skb->vlan_tci >> VLAN_PRIO_SHIFT :
+		 priv->default_vlan_prio;
+	int tc = netdev_get_prio_tc_map(dev, up);
+
+	return priv->channel[channel_ix]->tc_to_txq_map[tc];
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+					    struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+	return MLX5E_MIN_INLINE;
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+	struct mlx5_wq_cyc       *wq   = &sq->wq;
+
+	u16 pi = sq->pc & wq->sz_m1;
+	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+	struct mlx5_wqe_data_seg *dseg;
+
+	u8  opcode = MLX5_OPCODE_SEND;
+	dma_addr_t dma_addr = 0;
+	u16 headlen;
+	u16 ds_cnt;
+	u16 ihs;
+	int i;
+
+	memset(wqe, 0, sizeof(*wqe));
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+		eseg->cs_flags	= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+	else
+		sq->stats.csum_offload_none++;
+
+	if (skb_is_gso(skb)) {
+		u32 payload_len;
+
+		eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
+		opcode       = MLX5_OPCODE_LSO;
+		ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		payload_len  = skb->len - ihs;
+		MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+					(skb_shinfo(skb)->gso_segs - 1) * ihs;
+		sq->stats.tso_packets++;
+		sq->stats.tso_bytes += payload_len;
+	} else {
+		ihs = mlx5e_get_inline_hdr_size(sq, skb);
+		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+							ETH_ZLEN);
+	}
+
+	skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+	skb_pull_inline(skb, ihs);
+
+	eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+	ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+			       MLX5_SEND_WQE_DS);
+	dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+	MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+	headlen = skb_headlen(skb);
+	if (headlen) {
+		dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+					  DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+			goto dma_unmap_wqe_err;
+
+		dseg->addr       = cpu_to_be64(dma_addr);
+		dseg->lkey       = sq->mkey_be;
+		dseg->byte_count = cpu_to_be32(headlen);
+
+		mlx5e_dma_push(sq, dma_addr, headlen);
+		MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+		dseg++;
+	}
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+		int fsz = skb_frag_size(frag);
+
+		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+					    DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+			goto dma_unmap_wqe_err;
+
+		dseg->addr       = cpu_to_be64(dma_addr);
+		dseg->lkey       = sq->mkey_be;
+		dseg->byte_count = cpu_to_be32(fsz);
+
+		mlx5e_dma_push(sq, dma_addr, fsz);
+		MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+		dseg++;
+	}
+
+	ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+	sq->skb[pi] = skb;
+
+	MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+							MLX5_SEND_WQEBB_NUM_DS);
+	sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+	netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+		netif_tx_stop_queue(sq->txq);
+		sq->stats.stopped++;
+	}
+
+	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+		mlx5e_tx_notify_hw(sq, wqe);
+	}
+
+	/* fill sq edge with nops to avoid wqe wrap around */
+	while ((sq->pc & wq->sz_m1) > sq->edge)
+		mlx5e_send_nop(sq, false);
+
+	sq->stats.packets++;
+	return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+	sq->stats.dropped++;
+	mlx5e_dma_unmap_wqe_err(sq, skb);
+
+	dev_kfree_skb_any(skb);
+
+	return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+
+	return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+	struct mlx5e_sq *sq;
+	u32 dma_fifo_cc;
+	u32 nbytes;
+	u16 npkts;
+	u16 sqcc;
+	int i;
+
+	/* avoid accessing cq (dma coherent memory) if not needed */
+	if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+		return false;
+
+	sq = container_of(cq, struct mlx5e_sq, cq);
+
+	npkts = 0;
+	nbytes = 0;
+
+	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+	 * otherwise a cq overrun may occur
+	 */
+	sqcc = sq->cc;
+
+	/* avoid dirtying sq cache line every cqe */
+	dma_fifo_cc = sq->dma_fifo_cc;
+
+	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+		struct mlx5_cqe64 *cqe;
+		u16 wqe_counter;
+		bool last_wqe;
+
+		cqe = mlx5e_get_cqe(cq);
+		if (!cqe)
+			break;
+
+		mlx5_cqwq_pop(&cq->wq);
+
+		wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+		do {
+			struct sk_buff *skb;
+			u16 ci;
+			int j;
+
+			last_wqe = (sqcc == wqe_counter);
+
+			ci = sqcc & sq->wq.sz_m1;
+			skb = sq->skb[ci];
+
+			if (unlikely(!skb)) { /* nop */
+				sq->stats.nop++;
+				sqcc++;
+				continue;
+			}
+
+			for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+				dma_addr_t addr;
+				u32 size;
+
+				mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+				dma_fifo_cc++;
+				dma_unmap_single(sq->pdev, addr, size,
+						 DMA_TO_DEVICE);
+			}
+
+			npkts++;
+			nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+			sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+			dev_kfree_skb(skb);
+		} while (!last_wqe);
+	}
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	sq->dma_fifo_cc = dma_fifo_cc;
+	sq->cc = sqcc;
+
+	netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+	if (netif_tx_queue_stopped(sq->txq) &&
+	    mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
+	    likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+				netif_tx_wake_queue(sq->txq);
+				sq->stats.wake++;
+	}
+	if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+		set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+		return true;
+	}
+
+	return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 0000000..2c7cb67
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+	struct mlx5_cqwq *wq = &cq->wq;
+	u32 ci = mlx5_cqwq_get_ci(wq);
+	struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+	int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+	int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+	if (cqe_ownership_bit != sw_ownership_val)
+		return NULL;
+
+	/* ensure cqe content is read after cqe ownership bit */
+	rmb();
+
+	return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+					       napi);
+	bool busy = false;
+	int i;
+
+	clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+	for (i = 0; i < c->num_tc; i++)
+		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+	busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+	busy |= mlx5e_post_rx_wqes(&c->rq);
+
+	if (busy)
+		return budget;
+
+	napi_complete(napi);
+
+	/* avoid losing completion event during/after polling cqs */
+	if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+		napi_schedule(napi);
+		return 0;
+	}
+
+	for (i = 0; i < c->num_tc; i++)
+		mlx5e_cq_arm(&c->sq[i].cq);
+	mlx5e_cq_arm(&c->rq.cq);
+
+	return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+	set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+	set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+	barrier();
+	napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+	struct mlx5e_channel *c = cq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct net_device *netdev = priv->netdev;
+
+	netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+		   __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58800e4..a40b96d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -339,15 +339,14 @@
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
 {
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	struct mlx5_priv *priv = &dev->priv;
 	struct mlx5_create_eq_mbox_in *in;
 	struct mlx5_create_eq_mbox_out out;
 	int err;
 	int inlen;
 
 	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
-	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
-			     &eq->buf);
+	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
 	if (err)
 		return err;
 
@@ -378,14 +377,15 @@
 		goto err_in;
 	}
 
-	snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+	snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
 		 name, pci_name(dev->pdev));
+
 	eq->eqn = out.eq_number;
 	eq->irqn = vecidx;
 	eq->dev = dev;
 	eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-	err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
-			  eq->name, eq);
+	err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+			  priv->irq_info[vecidx].name, eq);
 	if (err)
 		goto err_eq;
 
@@ -401,7 +401,7 @@
 	return 0;
 
 err_irq:
-	free_irq(table->msix_arr[vecidx].vector, eq);
+	free_irq(priv->msix_arr[vecidx].vector, eq);
 
 err_eq:
 	mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@
 
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 {
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
 	int err;
 
 	mlx5_debug_eq_remove(dev, eq);
-	free_irq(table->msix_arr[eq->irqn].vector, eq);
+	free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
 	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
 	if (err)
 		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
 			       eq->eqn);
-	synchronize_irq(table->msix_arr[eq->irqn].vector);
+	synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
 	mlx5_buf_free(dev, &eq->buf);
 
 	return err;
@@ -456,7 +455,7 @@
 	u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
 	int err;
 
-	if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+	if (MLX5_CAP_GEN(dev, pg))
 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
 
 	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@
 
 	err = mlx5_create_map_eq(dev, &table->pages_eq,
 				 MLX5_EQ_VEC_PAGES,
-				 dev->caps.gen.max_vf + 1,
+				 /* TODO: sriov max_vf + */ 1,
 				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
 				 &dev->priv.uuari.uars[0]);
 	if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644
index 0000000..ca90b9b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+	struct mlx5_flow_table_group    g;
+	u32				id;
+	u32				start_ix;
+};
+
+struct mlx5_flow_table {
+	struct mlx5_core_dev	*dev;
+	u8			level;
+	u8			type;
+	u32			id;
+	struct mutex		mutex; /* sync bitmap alloc */
+	u16			num_groups;
+	struct mlx5_ftg		*group;
+	unsigned long		*bitmap;
+	u32			size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+				   u32 flow_index, void *flow_context)
+{
+	u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+	u32 *in;
+	void *in_flow_context;
+	int fcdls =
+		MLX5_GET(flow_context, flow_context, destination_list_size) *
+		MLX5_ST_SZ_BYTES(dest_format_struct);
+	int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+		return -ENOMEM;
+	}
+
+	MLX5_SET(set_fte_in, in, table_type, ft->type);
+	MLX5_SET(set_fte_in, in, table_id,   ft->id);
+	MLX5_SET(set_fte_in, in, flow_index, flow_index);
+	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+	memcpy(in_flow_context, flow_context,
+	       MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+	MLX5_SET(flow_context, in_flow_context, group_id,
+		 ft->group[group_ix].id);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+					 sizeof(out));
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+	u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+	u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+	MLX5_SET_DFTEI(in, table_type, ft->type);
+	MLX5_SET_DFTEI(in, table_id,   ft->id);
+	MLX5_SET_DFTEI(in, flow_index, flow_index);
+	MLX5_SET_DFTEI(in, opcode,     MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+	MLX5_SET_DFGI(in, table_type, ft->type);
+	MLX5_SET_DFGI(in, table_id,   ft->id);
+	MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+	MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+	u32 *in;
+	void *in_match_criteria;
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_flow_table_group *g = &ft->group[i].g;
+	u32 start_ix = ft->group[i].start_ix;
+	u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+		return -ENOMEM;
+	}
+	in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+					 match_criteria);
+
+	memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+	MLX5_SET_CFGI(in, table_type,            ft->type);
+	MLX5_SET_CFGI(in, table_id,              ft->id);
+	MLX5_SET_CFGI(in, opcode,                MLX5_CMD_OP_CREATE_FLOW_GROUP);
+	MLX5_SET_CFGI(in, start_flow_index,      start_ix);
+	MLX5_SET_CFGI(in, end_flow_index,        end_ix);
+	MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+	memcpy(in_match_criteria, g->match_criteria,
+	       MLX5_ST_SZ_BYTES(fte_match_param));
+
+	err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+					 sizeof(out));
+	if (!err)
+		ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+					   group_id);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+	int i;
+
+	for (i = 0; i < ft->num_groups; i++)
+		mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < ft->num_groups; i++) {
+		err = mlx5_create_flow_group_cmd(ft, i);
+		if (err)
+			goto err_destroy_flow_table_groups;
+	}
+
+	return 0;
+
+err_destroy_flow_table_groups:
+	for (i--; i >= 0; i--)
+		mlx5_destroy_flow_group_cmd(ft, i);
+
+	return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+	MLX5_SET(create_flow_table_in, in, level,      ft->level);
+	MLX5_SET(create_flow_table_in, in, log_size,   order_base_2(ft->size));
+
+	MLX5_SET(create_flow_table_in, in, opcode,
+		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+					 sizeof(out));
+	if (err)
+		return err;
+
+	ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+	return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+	MLX5_SET_DFTI(in, table_type, ft->type);
+	MLX5_SET_DFTI(in, table_id,   ft->id);
+	MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+			   u32 *match_criteria, int *group_ix)
+{
+	void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+				      outer_headers);
+	void *mc_misc  = MLX5_ADDR_OF(fte_match_param, match_criteria,
+				      misc_parameters);
+	void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+				      inner_headers);
+	int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+	int mc_misc_sz  = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+	int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+	int i;
+
+	for (i = 0; i < ft->num_groups; i++) {
+		struct mlx5_flow_table_group *g = &ft->group[i].g;
+		void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+					       g->match_criteria,
+					       outer_headers);
+		void *gmc_misc  = MLX5_ADDR_OF(fte_match_param,
+					       g->match_criteria,
+					       misc_parameters);
+		void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+					       g->match_criteria,
+					       inner_headers);
+
+		if (g->match_criteria_enable != match_criteria_enable)
+			continue;
+
+		if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+			if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+				continue;
+
+		if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+			if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+				continue;
+
+		if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+			if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+				continue;
+
+		*group_ix = i;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+	struct mlx5_ftg *g = &ft->group[group_ix];
+	int err = 0;
+
+	mutex_lock(&ft->mutex);
+
+	*ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+	if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+		err = -ENOSPC;
+	else
+		__set_bit(*ix, ft->bitmap);
+
+	mutex_unlock(&ft->mutex);
+
+	return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+	__clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+			      void *match_criteria, void *flow_context,
+			      u32 *flow_index)
+{
+	struct mlx5_flow_table *ft = flow_table;
+	int group_ix;
+	int err;
+
+	err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+			      &group_ix);
+	if (err) {
+		mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+		return err;
+	}
+
+	err = alloc_flow_index(ft, group_ix, flow_index);
+	if (err) {
+		mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+		return err;
+	}
+
+	return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+	struct mlx5_flow_table *ft = flow_table;
+
+	mlx5_del_flow_entry_cmd(ft, flow_index);
+	mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+			     u16 num_groups,
+			     struct mlx5_flow_table_group *group)
+{
+	struct mlx5_flow_table *ft;
+	u32 start_ix = 0;
+	u32 ft_size = 0;
+	void *gr;
+	void *bm;
+	int err;
+	int i;
+
+	for (i = 0; i < num_groups; i++)
+		ft_size += (1 << group[i].log_sz);
+
+	ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+	gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+	bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+	if (!ft || !gr || !bm)
+		goto err_free_ft;
+
+	ft->group	= gr;
+	ft->bitmap	= bm;
+	ft->num_groups	= num_groups;
+	ft->level	= level;
+	ft->type	= table_type;
+	ft->size	= ft_size;
+	ft->dev		= dev;
+	mutex_init(&ft->mutex);
+
+	for (i = 0; i < ft->num_groups; i++) {
+		memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+		ft->group[i].start_ix = start_ix;
+		start_ix += 1 << group[i].log_sz;
+	}
+
+	err = mlx5_create_flow_table_cmd(ft);
+	if (err)
+		goto err_free_ft;
+
+	err = mlx5_create_flow_table_groups(ft);
+	if (err)
+		goto err_destroy_flow_table_cmd;
+
+	return ft;
+
+err_destroy_flow_table_cmd:
+	mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+	mlx5_core_warn(dev, "failed to alloc flow table\n");
+	kfree(bm);
+	kfree(gr);
+	kfree(ft);
+
+	return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+	struct mlx5_flow_table *ft = flow_table;
+
+	mlx5_destroy_flow_table_groups(ft);
+	mlx5_destroy_flow_table_cmd(ft);
+	kfree(ft->bitmap);
+	kfree(ft->group);
+	kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+	struct mlx5_flow_table *ft = flow_table;
+
+	return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 4b4cda3..9335e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,79 +35,133 @@
 #include <linux/module.h>
 #include "mlx5_core.h"
 
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+				  int outlen)
 {
-	struct mlx5_cmd_query_adapter_mbox_out *out;
-	struct mlx5_cmd_query_adapter_mbox_in in;
-	int err;
-
-	out = kzalloc(sizeof(*out), GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	memset(&in, 0, sizeof(in));
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
-	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
-	if (err)
-		goto out_out;
-
-	if (out->hdr.status) {
-		err = mlx5_cmd_status_to_err(&out->hdr);
-		goto out_out;
-	}
-
-	memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
-
-out_out:
-	kfree(out);
-
-	return err;
-}
-
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
-{
-	return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
-}
-
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
-{
-	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
-	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-	void *out;
-	int err;
-
-	if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
-		return -ENOTSUPP;
+	u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
 
 	memset(in, 0, sizeof(in));
-	out = kzalloc(out_sz, GFP_KERNEL);
+
+	MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+	int err;
+
+	out = kzalloc(outlen, GFP_KERNEL);
 	if (!out)
 		return -ENOMEM;
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+
+	err = mlx5_cmd_query_adapter(dev, out, outlen);
 	if (err)
 		goto out;
 
-	err = mlx5_cmd_status_to_err_v2(out);
-	if (err) {
-		mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
-		goto out;
-	}
-
-	memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
-	       sizeof(*caps));
-
-	mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
-		be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
-		be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
-		be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+	memcpy(dev->board_id,
+	       MLX5_ADDR_OF(query_adapter_out, out,
+			    query_adapter_struct.vsd_contd_psid),
+	       MLX5_FLD_SZ_BYTES(query_adapter_out,
+				 query_adapter_struct.vsd_contd_psid));
 
 out:
 	kfree(out);
 	return err;
 }
-EXPORT_SYMBOL(mlx5_query_odp_caps);
+
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+	int err;
+
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_cmd_query_adapter(mdev, out, outlen);
+	if (err)
+		goto out;
+
+	*vendor_id = MLX5_GET(query_adapter_out, out,
+			      query_adapter_struct.ieee_vendor_id);
+out:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
+
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
+{
+	int err;
+
+	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+	if (err)
+		return err;
+
+	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
+	if (err)
+		return err;
+
+	if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, pg)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, atomic)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, roce)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)
+			return err;
+	}
+	return 0;
+}
 
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28425e5..afad529 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/interrupt.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cq.h>
 #include <linux/mlx5/qp.h>
@@ -47,10 +48,6 @@
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
 
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE  "January 2015"
-
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@
 
 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
 {
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	int num_eqs = 1 << dev->caps.gen.log_max_eq;
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_eq_table *table = &priv->eq_table;
+	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
 	int nvec;
 	int i;
 
-	nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+	nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+	       MLX5_EQ_VEC_COMP_BASE;
 	nvec = min_t(int, nvec, num_eqs);
 	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
 		return -ENOMEM;
 
-	table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
-	if (!table->msix_arr)
-		return -ENOMEM;
+	priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+	priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+	if (!priv->msix_arr || !priv->irq_info)
+		goto err_free_msix;
 
 	for (i = 0; i < nvec; i++)
-		table->msix_arr[i].entry = i;
+		priv->msix_arr[i].entry = i;
 
-	nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+	nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
 				     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
 	if (nvec < 0)
 		return nvec;
@@ -233,14 +234,20 @@
 	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
 
 	return 0;
+
+err_free_msix:
+	kfree(priv->irq_info);
+	kfree(priv->msix_arr);
+	return -ENOMEM;
 }
 
 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
 {
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	struct mlx5_priv *priv = &dev->priv;
 
 	pci_disable_msix(dev->pdev);
-	kfree(table->msix_arr);
+	kfree(priv->irq_info);
+	kfree(priv->msix_arr);
 }
 
 struct mlx5_reg_host_endianess {
@@ -277,98 +284,20 @@
 	}
 }
 
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
-{
-	__be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
-	u64 v64;
-
-	MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
-	MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
-	MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
-	MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
-	MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
-	MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
-	v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
-	*flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
-	if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
-		return 0;
-
-	return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
-}
-
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
-	struct mlx5_general_caps *gen = &caps->gen;
-
-	gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
-	gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
-	gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
-	gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
-	gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
-	gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
-	gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
-	gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
-	gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
-	gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
-	gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
-	gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
-	gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
-	gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
-	gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
-	gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
-	gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
-	gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
-	gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
-	gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
-	gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
-	gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
-	gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
-	gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
-	gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
-	pr_debug("flags = 0x%llx\n", gen->flags);
-	gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
-	gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
-	gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
-	gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
-	gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
-	gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
-	gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
-	gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
-	gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
-	gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
-	gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
-	switch (opmod) {
-	case HCA_CAP_OPMOD_GET_MAX:
-		return "GET_MAX";
-	case HCA_CAP_OPMOD_GET_CUR:
-		return "GET_CUR";
-	default:
-		return "Invalid";
-	}
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
-		       u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+		       enum mlx5_cap_mode cap_mode)
 {
 	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-	void *out;
+	void *out, *hca_caps;
+	u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
 	int err;
 
 	memset(in, 0, sizeof(in));
 	out = kzalloc(out_sz, GFP_KERNEL);
 	if (!out)
 		return -ENOMEM;
+
 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
 	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +306,30 @@
 
 	err = mlx5_cmd_status_to_err_v2(out);
 	if (err) {
-		mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+		mlx5_core_warn(dev,
+			       "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+			       cap_type, cap_mode, err);
 		goto query_ex;
 	}
-	mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
-	fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
 
+	hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+	switch (cap_mode) {
+	case HCA_CAP_OPMOD_GET_MAX:
+		memcpy(dev->hca_caps_max[cap_type], hca_caps,
+		       MLX5_UN_SZ_BYTES(hca_cap_union));
+		break;
+	case HCA_CAP_OPMOD_GET_CUR:
+		memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+		       MLX5_UN_SZ_BYTES(hca_cap_union));
+		break;
+	default:
+		mlx5_core_warn(dev,
+			       "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+			       cap_type, cap_mode);
+		err = -EINVAL;
+		break;
+	}
 query_ex:
 	kfree(out);
 	return err;
@@ -409,49 +356,45 @@
 {
 	void *set_ctx = NULL;
 	struct mlx5_profile *prof = dev->profile;
-	struct mlx5_caps *cur_caps = NULL;
-	struct mlx5_caps *max_caps = NULL;
 	int err = -ENOMEM;
 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+	void *set_hca_cap;
 
 	set_ctx = kzalloc(set_sz, GFP_KERNEL);
 	if (!set_ctx)
 		goto query_ex;
 
-	max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
-	if (!max_caps)
-		goto query_ex;
-
-	cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
-	if (!cur_caps)
-		goto query_ex;
-
-	err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
 	if (err)
 		goto query_ex;
 
-	err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
 	if (err)
 		goto query_ex;
 
+	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+				   capability);
+	memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+	       MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+	mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+		      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+		      128);
 	/* we limit the size of the pkey table to 128 entries for now */
-	cur_caps->gen.pkey_table_size = 128;
+	MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+		 to_fw_pkey_sz(128));
 
 	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
-		cur_caps->gen.log_max_qp = prof->log_max_qp;
+		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+			 prof->log_max_qp);
 
-	/* disable checksum */
-	cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+	/* disable cmdif checksum */
+	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
-	copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
-		       cur_caps);
 	err = set_caps(dev, set_ctx, set_sz);
 
 query_ex:
-	kfree(cur_caps);
-	kfree(max_caps);
 	kfree(set_ctx);
-
 	return err;
 }
 
@@ -507,6 +450,74 @@
 	return 0;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+	struct mlx5_priv *priv  = &mdev->priv;
+	struct msix_entry *msix = priv->msix_arr;
+	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+	int numa_node           = dev_to_node(&mdev->pdev->dev);
+	int err;
+
+	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+		return -ENOMEM;
+	}
+
+	cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+			priv->irq_info[i].mask);
+
+	err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+	if (err) {
+		mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+			       irq);
+		goto err_clear_mask;
+	}
+
+	return 0;
+
+err_clear_mask:
+	free_cpumask_var(priv->irq_info[i].mask);
+	return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+	struct mlx5_priv *priv  = &mdev->priv;
+	struct msix_entry *msix = priv->msix_arr;
+	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+	irq_set_affinity_hint(irq, NULL);
+	free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+		err = mlx5_irq_set_affinity_hint(mdev, i);
+		if (err)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	for (i--; i >= 0; i--)
+		mlx5_irq_clear_affinity_hint(mdev, i);
+
+	return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+	int i;
+
+	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+		mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
 {
 	struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +560,7 @@
 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	char name[MLX5_MAX_EQ_NAME];
+	char name[MLX5_MAX_IRQ_NAME];
 	struct mlx5_eq *eq;
 	int ncomp_vec;
 	int nent;
@@ -566,7 +577,7 @@
 			goto clean;
 		}
 
-		snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
 		err = mlx5_create_map_eq(dev, eq,
 					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
 					 name, &dev->priv.uuari.uars[0]);
@@ -588,6 +599,61 @@
 	return err;
 }
 
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+	u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+	u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+	u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+	u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+	int err;
+	u32 sup_issi;
+
+	memset(query_in, 0, sizeof(query_in));
+	memset(query_out, 0, sizeof(query_out));
+
+	MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+	err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+					 query_out, sizeof(query_out));
+	if (err) {
+		if (((struct mlx5_outbox_hdr *)query_out)->status ==
+		    MLX5_CMD_STAT_BAD_OP_ERR) {
+			pr_debug("Only ISSI 0 is supported\n");
+			return 0;
+		}
+
+		pr_err("failed to query ISSI\n");
+		return err;
+	}
+
+	sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+	if (sup_issi & (1 << 1)) {
+		memset(set_in, 0, sizeof(set_in));
+		memset(set_out, 0, sizeof(set_out));
+
+		MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+		MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+		err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+						 set_out, sizeof(set_out));
+		if (err) {
+			pr_err("failed to set ISSI=1\n");
+			return err;
+		}
+
+		dev->issi = 1;
+
+		return 0;
+	} else if (sup_issi & (1 << 0) || !sup_issi) {
+		return 0;
+	}
+
+	return -ENOTSUPP;
+}
+#endif
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
 	struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +716,14 @@
 		goto err_pagealloc_cleanup;
 	}
 
+#ifdef CONFIG_MLX5_CORE_EN
+	err = mlx5_core_set_issi(dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to set issi\n");
+		goto err_disable_hca;
+	}
+#endif
+
 	err = mlx5_satisfy_startup_pages(dev, 1);
 	if (err) {
 		dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,15 +762,15 @@
 
 	mlx5_start_health_poll(dev);
 
-	err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+	err = mlx5_query_hca_caps(dev);
 	if (err) {
 		dev_err(&pdev->dev, "query hca failed\n");
 		goto err_stop_poll;
 	}
 
-	err = mlx5_cmd_query_adapter(dev);
+	err = mlx5_query_board_id(dev);
 	if (err) {
-		dev_err(&pdev->dev, "query adapter failed\n");
+		dev_err(&pdev->dev, "query board id failed\n");
 		goto err_stop_poll;
 	}
 
@@ -730,6 +804,12 @@
 		goto err_stop_eqs;
 	}
 
+	err = mlx5_irq_set_affinity_hints(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+		goto err_free_comp_eqs;
+	}
+
 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
 
 	mlx5_init_cq_table(dev);
@@ -739,6 +819,9 @@
 
 	return 0;
 
+err_free_comp_eqs:
+	free_comp_eqs(dev);
+
 err_stop_eqs:
 	mlx5_stop_eqs(dev);
 
@@ -793,6 +876,7 @@
 	mlx5_cleanup_srq_table(dev);
 	mlx5_cleanup_qp_table(dev);
 	mlx5_cleanup_cq_table(dev);
+	mlx5_irq_clear_affinity_hints(dev);
 	free_comp_eqs(dev);
 	mlx5_stop_eqs(dev);
 	mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1132,10 @@
 	if (err)
 		goto err_health;
 
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5e_init();
+#endif
+
 	return 0;
 
 err_health:
@@ -1060,6 +1148,9 @@
 
 static void __exit cleanup(void)
 {
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5e_cleanup();
+#endif
 	pci_unregister_driver(&mlx5_core_driver);
 	mlx5_health_cleanup();
 	destroy_workqueue(mlx5_core_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d79fd85..d5a0c2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -91,7 +91,7 @@
 
 	memset(&in, 0, sizeof(in));
 	memset(&out, 0, sizeof(out));
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
 	memcpy(in.gid, mgid, sizeof(*mgid));
 	in.qpn = cpu_to_be32(qpn);
 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a051b90..fc88eca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,10 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE  "January 2015"
+
 extern int mlx5_core_debug_mask;
 
 #define mlx5_core_dbg(dev, format, ...)					\
@@ -65,11 +69,20 @@
 	MLX5_CMD_TIME, /* print command execution time */
 };
 
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+					     int in_size, u32 *out,
+					     int out_size)
+{
+	mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
 
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
-			   struct mlx5_caps *caps);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
 
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
 #endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 49e90f2..7014799 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -102,3 +102,229 @@
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+			 int ptys_size, int proto_mask, u8 local_port)
+{
+	u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(ptys_reg, in, local_port, local_port);
+	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+				   ptys_size, MLX5_REG_PTYS, 0, 0);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+			      u32 *proto_cap, int proto_mask)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+	if (err)
+		return err;
+
+	if (proto_mask == MLX5_PTYS_EN)
+		*proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+	else
+		*proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+				u32 *proto_admin, int proto_mask)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+	if (err)
+		return err;
+
+	if (proto_mask == MLX5_PTYS_EN)
+		*proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+	else
+		*proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+				    u8 *link_width_oper, u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+	if (err)
+		return err;
+
+	*link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+			       u8 *proto_oper, int proto_mask,
+			       u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+	if (err)
+		return err;
+
+	if (proto_mask == MLX5_PTYS_EN)
+		*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+	else
+		*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+			int proto_mask)
+{
+	u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(ptys_reg, in, local_port, 1);
+	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+	if (proto_mask == MLX5_PTYS_EN)
+		MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+	else
+		MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PTYS, 0, 1);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+			 enum mlx5_port_status status)
+{
+	u32 in[MLX5_ST_SZ_DW(paos_reg)];
+	u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(paos_reg, in, admin_status, status);
+	MLX5_SET(paos_reg, in, ase, 1);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+	u32 in[MLX5_ST_SZ_DW(paos_reg)];
+	u32 out[MLX5_ST_SZ_DW(paos_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PAOS, 0, 0);
+	if (err)
+		return err;
+
+	*status = MLX5_GET(paos_reg, out, oper_status);
+	return err;
+}
+
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+				int *max_mtu, int *oper_mtu, u8 port)
+{
+	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(pmtu_reg, in, local_port, port);
+
+	mlx5_core_access_reg(dev, in, sizeof(in), out,
+			     sizeof(out), MLX5_REG_PMTU, 0, 0);
+
+	if (max_mtu)
+		*max_mtu  = MLX5_GET(pmtu_reg, out, max_mtu);
+	if (oper_mtu)
+		*oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+	if (admin_mtu)
+		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+{
+	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+	MLX5_SET(pmtu_reg, in, local_port, port);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+			     u8 port)
+{
+	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+			      u8 port)
+{
+	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+				int pvlc_size,  u8 local_port)
+{
+	u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(ptys_reg, in, local_port, local_port);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+				   pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+	return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+			      u8 *vl_hw_cap, u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+	int err;
+
+	err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+	if (err)
+		return err;
+
+	*vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index dc7dbf7..8b494b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,10 +187,17 @@
 	struct mlx5_destroy_qp_mbox_in din;
 	struct mlx5_destroy_qp_mbox_out dout;
 	int err;
+	void *qpc;
 
 	memset(&out, 0, sizeof(out));
 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
 
+	if (dev->issi) {
+		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+		/* 0xffffff means we ask to work with cqe version 0 */
+		MLX5_SET(qpc, qpc, user_index, 0xffffff);
+	}
+
 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
 	if (err) {
 		mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f9d25dc..c48f504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -37,6 +37,7 @@
 #include <linux/mlx5/srq.h>
 #include <rdma/ib_verbs.h>
 #include "mlx5_core.h"
+#include "transobj.h"
 
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
 {
@@ -62,6 +63,74 @@
 		complete(&srq->free);
 }
 
+static int get_pas_size(void *srqc)
+{
+	u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+	u32 log_srq_size  = MLX5_GET(srqc, srqc, log_srq_size);
+	u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+	u32 page_offset   = MLX5_GET(srqc, srqc, page_offset);
+	u32 po_quanta	  = 1 << (log_page_size - 6);
+	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
+	u32 page_size	  = 1 << log_page_size;
+	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
+	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
+
+	return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+	void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+	if (srqc_to_rmpc) {
+		switch (MLX5_GET(srqc, srqc, state)) {
+		case MLX5_SRQC_STATE_GOOD:
+			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+			break;
+		case MLX5_SRQC_STATE_ERROR:
+			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+			break;
+		default:
+			pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+				__LINE__, MLX5_GET(srqc, srqc, state));
+			MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+		}
+
+		MLX5_SET(wq,   wq, wq_signature,  MLX5_GET(srqc,  srqc, wq_signature));
+		MLX5_SET(wq,   wq, log_wq_pg_sz,  MLX5_GET(srqc,  srqc, log_page_size));
+		MLX5_SET(wq,   wq, log_wq_stride, MLX5_GET(srqc,  srqc, log_rq_stride) + 4);
+		MLX5_SET(wq,   wq, log_wq_sz,     MLX5_GET(srqc,  srqc, log_srq_size));
+		MLX5_SET(wq,   wq, page_offset,   MLX5_GET(srqc,  srqc, page_offset));
+		MLX5_SET(wq,   wq, lwm,           MLX5_GET(srqc,  srqc, lwm));
+		MLX5_SET(wq,   wq, pd,            MLX5_GET(srqc,  srqc, pd));
+		MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc,	  srqc, dbr_addr));
+	} else {
+		switch (MLX5_GET(rmpc, rmpc, state)) {
+		case MLX5_RMPC_STATE_RDY:
+			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+			break;
+		case MLX5_RMPC_STATE_ERR:
+			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+			break;
+		default:
+			pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+				__func__, __LINE__,
+				MLX5_GET(rmpc, rmpc, state));
+			MLX5_SET(srqc, srqc, state,
+				 MLX5_GET(rmpc, rmpc, state));
+		}
+
+		MLX5_SET(srqc,   srqc, wq_signature,   MLX5_GET(wq,   wq, wq_signature));
+		MLX5_SET(srqc,   srqc, log_page_size,  MLX5_GET(wq,   wq, log_wq_pg_sz));
+		MLX5_SET(srqc,   srqc, log_rq_stride,  MLX5_GET(wq,   wq, log_wq_stride) - 4);
+		MLX5_SET(srqc,   srqc, log_srq_size,   MLX5_GET(wq,   wq, log_wq_sz));
+		MLX5_SET(srqc,   srqc, page_offset,    MLX5_GET(wq,   wq, page_offset));
+		MLX5_SET(srqc,   srqc, lwm,	       MLX5_GET(wq,   wq, lwm));
+		MLX5_SET(srqc,   srqc, pd,	       MLX5_GET(wq,   wq, pd));
+		MLX5_SET64(srqc, srqc, dbr_addr,       MLX5_GET64(wq, wq, dbr_addr));
+	}
+}
+
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
 {
 	struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@
 }
 EXPORT_SYMBOL(mlx5_core_get_srq);
 
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			 struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			  struct mlx5_create_srq_mbox_in *in, int inlen)
 {
 	struct mlx5_create_srq_mbox_out out;
-	struct mlx5_srq_table *table = &dev->priv.srq_table;
-	struct mlx5_destroy_srq_mbox_in din;
-	struct mlx5_destroy_srq_mbox_out dout;
 	int err;
 
 	memset(&out, 0, sizeof(out));
-	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
-	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
-	if (err)
-		return err;
 
-	if (out.hdr.status)
-		return mlx5_cmd_status_to_err(&out.hdr);
+	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
+
+	err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+					 sizeof(out));
 
 	srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
 
+	return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+			   struct mlx5_core_srq *srq)
+{
+	struct mlx5_destroy_srq_mbox_in in;
+	struct mlx5_destroy_srq_mbox_out out;
+
+	memset(&in, 0, sizeof(in));
+	memset(&out, 0, sizeof(out));
+	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+	in.srqn = cpu_to_be32(srq->srqn);
+
+	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+					  (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+		       u16 lwm, int is_srq)
+{
+	struct mlx5_arm_srq_mbox_in	in;
+	struct mlx5_arm_srq_mbox_out	out;
+
+	memset(&in, 0, sizeof(in));
+	memset(&out, 0, sizeof(out));
+
+	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+	in.hdr.opmod = cpu_to_be16(!!is_srq);
+	in.srqn = cpu_to_be32(srq->srqn);
+	in.lwm = cpu_to_be16(lwm);
+
+	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+					  sizeof(in), (u32 *)(&out),
+					  sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_query_srq_mbox_out *out)
+{
+	struct mlx5_query_srq_mbox_in in;
+
+	memset(&in, 0, sizeof(in));
+
+	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+	in.srqn = cpu_to_be32(srq->srqn);
+
+	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+					  (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			      struct mlx5_core_srq *srq,
+			      struct mlx5_create_srq_mbox_in *in,
+			      int srq_inlen)
+{
+	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+	void *create_in;
+	void *srqc;
+	void *xrc_srqc;
+	void *pas;
+	int pas_size;
+	int inlen;
+	int err;
+
+	srqc	  = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+	pas_size  = get_pas_size(srqc);
+	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+	create_in = mlx5_vzalloc(inlen);
+	if (!create_in)
+		return -ENOMEM;
+
+	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+				xrc_srq_context_entry);
+	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+	memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+	memcpy(pas, in->pas, pas_size);
+	/* 0xffffff means we ask to work with cqe version 0 */
+	MLX5_SET(xrc_srqc,	    xrc_srqc,  user_index, 0xffffff);
+	MLX5_SET(create_xrc_srq_in, create_in, opcode,
+		 MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+	memset(create_out, 0, sizeof(create_out));
+	err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+					 sizeof(create_out));
+	if (err)
+		goto out;
+
+	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+	kvfree(create_in);
+	return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			       struct mlx5_core_srq *srq)
+{
+	u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+	u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+	memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+		 MLX5_CMD_OP_DESTROY_XRC_SRQ);
+	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+	return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+					  xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			   struct mlx5_core_srq *srq, u16 lwm)
+{
+	u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+	u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+	memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
+
+	return  mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+					   xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			     struct mlx5_core_srq *srq,
+			     struct mlx5_query_srq_mbox_out *out)
+{
+	u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+	u32 *xrcsrq_out;
+	void *srqc;
+	void *xrc_srqc;
+	int err;
+
+	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+	if (!xrcsrq_out)
+		return -ENOMEM;
+	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+	MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+		 MLX5_CMD_OP_QUERY_XRC_SRQ);
+	MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+	err =  mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+					  xrcsrq_out,
+					  MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+	if (err)
+		goto out;
+
+	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+				xrc_srq_context_entry);
+	srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+	memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+	kvfree(xrcsrq_out);
+	return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			  struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+	void *create_in;
+	void *rmpc;
+	void *srqc;
+	int pas_size;
+	int inlen;
+	int err;
+
+	srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+	pas_size = get_pas_size(srqc);
+	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+	create_in = mlx5_vzalloc(inlen);
+	if (!create_in)
+		return -ENOMEM;
+
+	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+	rmpc_srqc_reformat(srqc, rmpc, true);
+
+	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+	kvfree(create_in);
+	return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+			   struct mlx5_core_srq *srq)
+{
+	return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+		       struct mlx5_core_srq *srq,
+		       u16 lwm)
+{
+	void *in;
+	void *rmpc;
+	void *wq;
+	void *bitmask;
+	int err;
+
+	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+	if (!in)
+		return -ENOMEM;
+
+	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
+	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
+	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
+
+	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
+	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
+	MLX5_SET(wq,		wq,	 lwm,	    lwm);
+	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
+	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+	err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+	kvfree(in);
+	return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_query_srq_mbox_out *out)
+{
+	u32 *rmp_out;
+	void *rmpc;
+	void *srqc;
+	int err;
+
+	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+	if (!rmp_out)
+		return -ENOMEM;
+
+	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+	if (err)
+		goto out;
+
+	srqc = MLX5_ADDR_OF(query_srq_out, out,	    srq_context_entry);
+	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+	rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+	kvfree(rmp_out);
+	return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+			    struct mlx5_core_srq *srq,
+			    struct mlx5_create_srq_mbox_in *in,
+			    int inlen, int is_xrc)
+{
+	if (!dev->issi)
+		return create_srq_cmd(dev, srq, in, inlen);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return create_xrc_srq_cmd(dev, srq, in, inlen);
+	else
+		return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+			     struct mlx5_core_srq *srq)
+{
+	if (!dev->issi)
+		return destroy_srq_cmd(dev, srq);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return destroy_xrc_srq_cmd(dev, srq);
+	else
+		return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_create_srq_mbox_in *in, int inlen,
+			 int is_xrc)
+{
+	int err;
+	struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+	srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+	err = create_srq_split(dev, srq, in, inlen, is_xrc);
+	if (err)
+		return err;
+
 	atomic_set(&srq->refcount, 1);
 	init_completion(&srq->free);
 
@@ -107,25 +461,20 @@
 	spin_unlock_irq(&table->lock);
 	if (err) {
 		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
-		goto err_cmd;
+		goto err_destroy_srq_split;
 	}
 
 	return 0;
 
-err_cmd:
-	memset(&din, 0, sizeof(din));
-	memset(&dout, 0, sizeof(dout));
-	din.srqn = cpu_to_be32(srq->srqn);
-	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
-	mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+	destroy_srq_split(dev, srq);
+
 	return err;
 }
 EXPORT_SYMBOL(mlx5_core_create_srq);
 
 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
 {
-	struct mlx5_destroy_srq_mbox_in in;
-	struct mlx5_destroy_srq_mbox_out out;
 	struct mlx5_srq_table *table = &dev->priv.srq_table;
 	struct mlx5_core_srq *tmp;
 	int err;
@@ -142,17 +491,10 @@
 		return -EINVAL;
 	}
 
-	memset(&in, 0, sizeof(in));
-	memset(&out, 0, sizeof(out));
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
-	in.srqn = cpu_to_be32(srq->srqn);
-	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+	err = destroy_srq_split(dev, srq);
 	if (err)
 		return err;
 
-	if (out.hdr.status)
-		return mlx5_cmd_status_to_err(&out.hdr);
-
 	if (atomic_dec_and_test(&srq->refcount))
 		complete(&srq->free);
 	wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@
 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 			struct mlx5_query_srq_mbox_out *out)
 {
-	struct mlx5_query_srq_mbox_in in;
-	int err;
-
-	memset(&in, 0, sizeof(in));
-	memset(out, 0, sizeof(*out));
-
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
-	in.srqn = cpu_to_be32(srq->srqn);
-	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
-	if (err)
-		return err;
-
-	if (out->hdr.status)
-		return mlx5_cmd_status_to_err(&out->hdr);
-
-	return err;
+	if (!dev->issi)
+		return query_srq_cmd(dev, srq, out);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return query_xrc_srq_cmd(dev, srq, out);
+	else
+		return query_rmp_cmd(dev, srq, out);
 }
 EXPORT_SYMBOL(mlx5_core_query_srq);
 
 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 		      u16 lwm, int is_srq)
 {
-	struct mlx5_arm_srq_mbox_in	in;
-	struct mlx5_arm_srq_mbox_out	out;
-	int err;
-
-	memset(&in, 0, sizeof(in));
-	memset(&out, 0, sizeof(out));
-
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
-	in.hdr.opmod = cpu_to_be16(!!is_srq);
-	in.srqn = cpu_to_be32(srq->srqn);
-	in.lwm = cpu_to_be16(lwm);
-
-	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-	if (err)
-		return err;
-
-	if (out.hdr.status)
-		return mlx5_cmd_status_to_err(&out.hdr);
-
-	return err;
+	if (!dev->issi)
+		return arm_srq_cmd(dev, srq, lwm, is_srq);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return arm_xrc_srq_cmd(dev, srq, lwm);
+	else
+		return arm_rmp_cmd(dev, srq, lwm);
 }
 EXPORT_SYMBOL(mlx5_core_arm_srq);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644
index 0000000..8d98b03
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(alloc_transport_domain_in, in, opcode,
+		 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*tdn = MLX5_GET(alloc_transport_domain_out, out,
+				transport_domain);
+
+	return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+	u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(dealloc_transport_domain_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+	int err;
+
+	MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rqn = MLX5_GET(create_rq_out, out, rqn);
+
+	return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+	MLX5_SET(modify_rq_in, in, rqn, rqn);
+	MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+	MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+	int err;
+
+	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*sqn = MLX5_GET(create_sq_out, out, sqn);
+
+	return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+	MLX5_SET(modify_sq_in, in, sqn, sqn);
+	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+	MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tirn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+	int err;
+
+	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*tirn = MLX5_GET(create_tir_out, out, tirn);
+
+	return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+	u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+	MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tisn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+	int err;
+
+	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*tisn = MLX5_GET(create_tis_out, out, tisn);
+
+	return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+	u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+	MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rmpn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+	int err;
+
+	MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+	return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+	MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+					  sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+	int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+	MLX5_SET(query_rmp_in, in, rmpn,   rmpn);
+
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+	void *in;
+	void *rmpc;
+	void *wq;
+	void *bitmask;
+	int  err;
+
+	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+	if (!in)
+		return -ENOMEM;
+
+	rmpc    = MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
+	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
+	wq      = MLX5_ADDR_OF(rmpc,	        rmpc, wq);
+
+	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
+	MLX5_SET(modify_rmp_in, in,	 rmpn,      rmpn);
+	MLX5_SET(wq,		wq,	 lwm,	    lwm);
+	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
+	MLX5_SET(rmpc,		rmpc,	 state,	    MLX5_RMPC_STATE_RDY);
+
+	err =  mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+	kvfree(in);
+
+	return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			  u32 *xsrqn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+	int err;
+
+	MLX5_SET(create_xrc_srq_in, in, opcode,     MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+	return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(destroy_xrc_srq_in, in, opcode,   MLX5_CMD_OP_DESTROY_XRC_SRQ);
+	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+					  sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+	void *srqc;
+	void *xrc_srqc;
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(query_xrc_srq_in, in, opcode,   MLX5_CMD_OP_QUERY_XRC_SRQ);
+	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+	err =  mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+					  out,
+					  MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+	if (!err) {
+		xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+					xrc_srq_context_entry);
+		srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+		memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+	}
+
+	return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+	u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(arm_xrc_srq_in, in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+	MLX5_SET(arm_xrc_srq_in, in, lwm,      lwm);
+	MLX5_SET(arm_xrc_srq_in, in, op_mod,
+		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+	return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+					   sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644
index 0000000..f9ef244
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			  u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5a89bb1..9ef8587 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -175,12 +175,13 @@
 	for (i = 0; i < tot_uuars; i++) {
 		bf = &uuari->bfs[i];
 
-		bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+		bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
 		bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
 		bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
 		bf->reg = NULL; /* Add WC support */
-		bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
-			MLX5_BF_OFFSET;
+		bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+			     (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+			     MLX5_BF_OFFSET;
 		bf->need_lock = need_uuar_lock(i);
 		spin_lock_init(&bf->lock);
 		spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@
 
 	return 0;
 }
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+	phys_addr_t pfn;
+	phys_addr_t uar_bar_start;
+	int err;
+
+	err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+		return err;
+	}
+
+	uar_bar_start = pci_resource_start(mdev->pdev, 0);
+	pfn           = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+	uar->map      = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+	if (!uar->map) {
+		mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+		err = -ENOMEM;
+		goto err_free_uar;
+	}
+
+	return 0;
+
+err_free_uar:
+	mlx5_cmd_free_uar(mdev, uar->index);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+	iounmap(uar->map);
+	mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644
index 0000000..b94177e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(query_vport_state_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_VPORT_STATE);
+	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+					 sizeof(out));
+	if (err)
+		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+	return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_state);
+
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+	u32  in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+	u8 *out_addr;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return;
+
+	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+				nic_vport_context.permanent_address);
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(query_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+	memset(out, 0, outlen);
+	mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+	ether_addr_copy(addr, &out_addr[2]);
+
+	kvfree(out);
+}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+			     u8 port_num, u16  vf_num, u16 gid_index,
+			     union ib_gid *gid)
+{
+	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+	int is_group_manager;
+	void *out = NULL;
+	void *in = NULL;
+	union ib_gid *tmp;
+	int tbsz;
+	int nout;
+	int err;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+		      vf_num, gid_index, tbsz);
+
+	if (gid_index > tbsz && gid_index != 0xffff)
+		return -EINVAL;
+
+	if (gid_index == 0xffff)
+		nout = tbsz;
+	else
+		nout = 1;
+
+	out_sz += nout * sizeof(*gid);
+
+	in = kzalloc(in_sz, GFP_KERNEL);
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!in || !out) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+		} else {
+			err = -EPERM;
+			goto out;
+		}
+	}
+	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+	if (err)
+		goto out;
+
+	err = mlx5_cmd_status_to_err_v2(out);
+	if (err)
+		goto out;
+
+	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+	gid->global.subnet_prefix = tmp->global.subnet_prefix;
+	gid->global.interface_id = tmp->global.interface_id;
+
+out:
+	kfree(in);
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+			      u8 port_num, u16 vf_num, u16 pkey_index,
+			      u16 *pkey)
+{
+	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+	int is_group_manager;
+	void *out = NULL;
+	void *in = NULL;
+	void *pkarr;
+	int nout;
+	int tbsz;
+	int err;
+	int i;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+	if (pkey_index > tbsz && pkey_index != 0xffff)
+		return -EINVAL;
+
+	if (pkey_index == 0xffff)
+		nout = tbsz;
+	else
+		nout = 1;
+
+	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+	in = kzalloc(in_sz, GFP_KERNEL);
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!in || !out) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+		} else {
+			err = -EPERM;
+			goto out;
+		}
+	}
+	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+	if (err)
+		goto out;
+
+	err = mlx5_cmd_status_to_err_v2(out);
+	if (err)
+		goto out;
+
+	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+	kfree(in);
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+				 u8 other_vport, u8 port_num,
+				 u16 vf_num,
+				 struct mlx5_hca_vport_context *rep)
+{
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+	int is_group_manager;
+	void *out;
+	void *ctx;
+	int err;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+	memset(in, 0, sizeof(in));
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+		} else {
+			err = -EPERM;
+			goto ex;
+		}
+	}
+
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
+	if (err)
+		goto ex;
+	err = mlx5_cmd_status_to_err_v2(out);
+	if (err)
+		goto ex;
+
+	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+				      port_physical_state);
+	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+					       port_physical_state);
+	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+					  cap_mask1_field_select);
+	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+					  cap_mask2_field_select);
+	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+					   init_type_reply);
+	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+					  subnet_timeout);
+	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+						  qkey_violation_counter);
+	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+						  pkey_violation_counter);
+	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+					    system_image_guid);
+
+ex:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+					   u64 *sys_image_guid)
+{
+	struct mlx5_hca_vport_context *rep;
+	int err;
+
+	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+	if (!rep)
+		return -ENOMEM;
+
+	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+	if (!err)
+		*sys_image_guid = rep->sys_image_guid;
+
+	kfree(rep);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+				   u64 *node_guid)
+{
+	struct mlx5_hca_vport_context *rep;
+	int err;
+
+	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+	if (!rep)
+		return -ENOMEM;
+
+	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+	if (!err)
+		*node_guid = rep->node_guid;
+
+	kfree(rep);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 0000000..8388411
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+	return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+	return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+	return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+	return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+	return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+	return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		       void *wqc, struct mlx5_wq_cyc *wq,
+		       struct mlx5_wq_ctrl *wq_ctrl)
+{
+	int err;
+
+	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+		goto err_db_free;
+	}
+
+	wq->buf = wq_ctrl->buf.direct.buf;
+	wq->db  = wq_ctrl->db.db;
+
+	wq_ctrl->mdev = mdev;
+
+	return 0;
+
+err_db_free:
+	mlx5_db_free(mdev, &wq_ctrl->db);
+
+	return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		     void *cqc, struct mlx5_cqwq *wq,
+		     struct mlx5_wq_ctrl *wq_ctrl)
+{
+	int err;
+
+	wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+	wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+	wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+		goto err_db_free;
+	}
+
+	wq->buf = wq_ctrl->buf.direct.buf;
+	wq->db  = wq_ctrl->db.db;
+
+	wq_ctrl->mdev = mdev;
+
+	return 0;
+
+err_db_free:
+	mlx5_db_free(mdev, &wq_ctrl->db);
+
+	return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		      void *wqc, struct mlx5_wq_ll *wq,
+		      struct mlx5_wq_ctrl *wq_ctrl)
+{
+	struct mlx5_wqe_srq_next_seg *next_seg;
+	int err;
+	int i;
+
+	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+		goto err_db_free;
+	}
+
+	wq->buf = wq_ctrl->buf.direct.buf;
+	wq->db  = wq_ctrl->db.db;
+
+	for (i = 0; i < wq->sz_m1; i++) {
+		next_seg = mlx5_wq_ll_get_wqe(wq, i);
+		next_seg->next_wqe_index = cpu_to_be16(i + 1);
+	}
+	next_seg = mlx5_wq_ll_get_wqe(wq, i);
+	wq->tail_next = &next_seg->next_wqe_index;
+
+	wq_ctrl->mdev = mdev;
+
+	return 0;
+
+err_db_free:
+	mlx5_db_free(mdev, &wq_ctrl->db);
+
+	return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+	mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 0000000..e0ddd69
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+	int		linear;
+	int		numa;
+};
+
+struct mlx5_wq_ctrl {
+	struct mlx5_core_dev	*mdev;
+	struct mlx5_buf		buf;
+	struct mlx5_db		db;
+};
+
+struct mlx5_wq_cyc {
+	void			*buf;
+	__be32			*db;
+	u16			sz_m1;
+	u8			log_stride;
+};
+
+struct mlx5_cqwq {
+	void			*buf;
+	__be32			*db;
+	u32			sz_m1;
+	u32			cc; /* consumer counter */
+	u8			log_sz;
+	u8			log_stride;
+};
+
+struct mlx5_wq_ll {
+	void			*buf;
+	__be32			*db;
+	__be16			*tail_next;
+	u16			sz_m1;
+	u16			head;
+	u16			wqe_ctr;
+	u16			cur_sz;
+	u8			log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		       void *wqc, struct mlx5_wq_cyc *wq,
+		       struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		     void *cqc, struct mlx5_cqwq *wq,
+		     struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		      void *wqc, struct mlx5_wq_ll *wq,
+		      struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+	return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+	return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+	int equal   = (cc1 == cc2);
+	int smaller = 0x8000 & (cc1 - cc2);
+
+	return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+	return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+	return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+	return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+	wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+	*wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+	return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+	return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+	return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+	wq->head = head_next;
+	wq->wqe_ctr++;
+	wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+				  __be16 *next_tail_next)
+{
+	*wq->tail_next = ix;
+	wq->tail_next = next_tail_next;
+	wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+	*wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d16b11e..b7e2f49 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -8,9 +8,7 @@
 	depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \
 		   (ARM && ARCH_KS8695)
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 6f332eb..75dc46c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6664,7 +6664,7 @@
 				wake_up_interruptible(
 					&hw_priv->counter[i].counter);
 			}
-		} else if (jiffies >= hw_priv->counter[i].time) {
+		} else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
 			/* Only read MIB counters when the port is connected. */
 			if (media_connected == mib->state)
 				hw_priv->counter[i].read = 1;
@@ -6689,7 +6689,7 @@
 
 	/* This is used to verify Wake-on-LAN is working. */
 	if (hw_priv->pme_wait) {
-		if (hw_priv->pme_wait <= jiffies) {
+		if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
 			hw_clr_wol_pme_status(&hw_priv->hw);
 			hw_priv->pme_wait = 0;
 		}
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index afaf0c0..3fd8ca6 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on SPI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
index 1731e05..5b531da 100644
--- a/drivers/net/ethernet/moxa/Kconfig
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on (ARM && ARCH_MOXART)
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 81d0f1c..becbb5f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -244,7 +244,6 @@
 		napi_gro_receive(&priv->napi, skb);
 		rx++;
 
-		ndev->last_rx = jiffies;
 		priv->stats.rx_packets++;
 		priv->stats.rx_bytes += len;
 		if (desc0 & RX_DESC0_MULTICAST)
diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig
index 3932d08..9645c72 100644
--- a/drivers/net/ethernet/myricom/Kconfig
+++ b/drivers/net/ethernet/myricom/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI && INET
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say
-	  Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index a100860..a10ef50 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -6,9 +6,7 @@
 	bool "National Semi-conductor devices"
 	default y
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -24,8 +22,7 @@
 	  Support for NatSemi SONIC based Ethernet devices.  This includes
 	  the onboard Ethernet in many Quadras as well as some LC-PDS,
 	  a few Nubus and all known Comm Slot Ethernet cards.  If you have
-	  one of these say Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  one of these say Y here.
 
 	  To compile this driver as a module, choose M here. This module will
 	  be called macsonic.
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 87abb4f..7189900 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say
-	  Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 1e0f72b..c281117 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5308,7 +5308,8 @@
 
 /**
  * s2io_ethtool_sset - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
  * @info: pointer to the structure with parameters given by ethtool to set
  * link information.
  * Description:
@@ -5793,7 +5794,8 @@
 
 /**
  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
- *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
+ *  @sp : private member of the device structure, which is a pointer to the
+ *  s2io_nic structure.
  *  @eeprom : pointer to the user level structure provided by ethtool,
  *  containing all relevant information.
  *  @data_buf : user defined value to be written into Eeprom.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 9e1aaa7..5f630a2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1004,8 +1004,6 @@
 static enum vxge_hw_status
 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
 {
-	void **tmp_arr;
-
 	if (channel->reserve_ptr - channel->reserve_top > 0) {
 _alloc_after_swap:
 		*dtrh =	channel->reserve_arr[--channel->reserve_ptr];
@@ -1020,10 +1018,7 @@
 	 * i.e.	no additional lock need	to be done when	we free	a resource */
 
 	if (channel->length - channel->free_ptr > 0) {
-
-		tmp_arr	= channel->reserve_arr;
-		channel->reserve_arr = channel->free_arr;
-		channel->free_arr = tmp_arr;
+		swap(channel->reserve_arr, channel->free_arr);
 		channel->reserve_ptr = channel->length;
 		channel->reserve_top = channel->free_ptr;
 		channel->free_ptr = channel->length;
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
index 01182b5..71c973f 100644
--- a/drivers/net/ethernet/nuvoton/Kconfig
+++ b/drivers/net/ethernet/nuvoton/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on ARM && ARCH_W90X900
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig
index ace19e7..4efc9fe 100644
--- a/drivers/net/ethernet/nvidia/Kconfig
+++ b/drivers/net/ethernet/nvidia/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -22,9 +20,7 @@
 	tristate "nForce Ethernet support"
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) controller of this type, say Y and
-	  read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) controller of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called forcedeth.
diff --git a/drivers/net/ethernet/oki-semi/Kconfig b/drivers/net/ethernet/oki-semi/Kconfig
index ecd45f9..5a975af 100644
--- a/drivers/net/ethernet/oki-semi/Kconfig
+++ b/drivers/net/ethernet/oki-semi/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8d51800..b5ea2a5 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -23,9 +21,7 @@
 	depends on PCI
 	select MII
 	---help---
-	  If you have a Gigabit Ethernet card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a Gigabit Ethernet card of this type, say Y here.
 
 	  To compile this driver as a module, choose M here. The module will be
 	  called hamachi.
diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig
index 01e6c32..db19c6f 100644
--- a/drivers/net/ethernet/pasemi/Kconfig
+++ b/drivers/net/ethernet/pasemi/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PPC_PASEMI && PCI && INET
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index d49cba1..f1f0108 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f221126..055f376 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1326,9 +1326,6 @@
 };
 
 
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM	-1
-
 #define MAX_BW			100	/* % of link speed */
 #define MIN_BW			1	/* % of link speed */
 #define MAX_VLAN_ID		4095
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 367f397..2f6cc42 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1031,7 +1031,7 @@
 		pfn = pci_info[i].id;
 
 		if (pfn >= ahw->max_vnic_func) {
-			ret = QL_STATUS_INVALID_PARAM;
+			ret = -EINVAL;
 			dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
 				__func__, pfn, ahw->max_vnic_func);
 			goto err_eswitch;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 59a721f..05c28f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -24,8 +24,6 @@
 #include <linux/hwmon-sysfs.h>
 #endif
 
-#define QLC_STATUS_UNSUPPORTED_CMD	-2
-
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
 	return -EOPNOTSUPP;
@@ -166,7 +164,7 @@
 	u8 b_state, b_rate;
 
 	if (len != sizeof(u16))
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	memcpy(&beacon, buf, sizeof(u16));
 	err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +381,17 @@
 		dest_pci_func = pm_cfg[i].dest_npar;
 		src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
 		if (src_index < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 
 		dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
 		if (dest_index < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 
 		s_esw_id = adapter->npars[src_index].phy_port;
 		d_esw_id = adapter->npars[dest_index].phy_port;
 
 		if (s_esw_id != d_esw_id)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 	}
 
 	return 0;
@@ -414,7 +412,7 @@
 	count	= size / sizeof(struct qlcnic_pm_func_cfg);
 	rem	= size % sizeof(struct qlcnic_pm_func_cfg);
 	if (rem)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 	pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +425,7 @@
 		action = !!pm_cfg[i].action;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
 		if (index < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 
 		id = adapter->npars[index].phy_port;
 		ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +438,7 @@
 		pci_func = pm_cfg[i].pci_func;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
 		if (index < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 		id = adapter->npars[index].phy_port;
 		adapter->npars[index].enable_pm = !!pm_cfg[i].action;
 		adapter->npars[index].dest_npar = id;
@@ -499,11 +497,11 @@
 	for (i = 0; i < count; i++) {
 		pci_func = esw_cfg[i].pci_func;
 		if (pci_func >= ahw->max_vnic_func)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 
 		if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
 			if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
-				return QL_STATUS_INVALID_PARAM;
+				return -EINVAL;
 
 		switch (esw_cfg[i].op_mode) {
 		case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +515,25 @@
 
 			if (ret != QLCNIC_NON_PRIV_FUNC) {
 				if (esw_cfg[i].mac_anti_spoof != 0)
-					return QL_STATUS_INVALID_PARAM;
+					return -EINVAL;
 				if (esw_cfg[i].mac_override != 1)
-					return QL_STATUS_INVALID_PARAM;
+					return -EINVAL;
 				if (esw_cfg[i].promisc_mode != 1)
-					return QL_STATUS_INVALID_PARAM;
+					return -EINVAL;
 			}
 			break;
 		case QLCNIC_ADD_VLAN:
 			if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
-				return QL_STATUS_INVALID_PARAM;
+				return -EINVAL;
 			if (!esw_cfg[i].op_type)
-				return QL_STATUS_INVALID_PARAM;
+				return -EINVAL;
 			break;
 		case QLCNIC_DEL_VLAN:
 			if (!esw_cfg[i].op_type)
-				return QL_STATUS_INVALID_PARAM;
+				return -EINVAL;
 			break;
 		default:
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 		}
 	}
 
@@ -559,7 +557,7 @@
 	count	= size / sizeof(struct qlcnic_esw_func_cfg);
 	rem	= size % sizeof(struct qlcnic_esw_func_cfg);
 	if (rem)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 	esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +568,7 @@
 	for (i = 0; i < count; i++) {
 		if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
 			if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
-				return QL_STATUS_INVALID_PARAM;
+				return -EINVAL;
 
 		if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
 			continue;
@@ -604,7 +602,7 @@
 		pci_func = esw_cfg[i].pci_func;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
 		if (index < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 		npar = &adapter->npars[index];
 		switch (esw_cfg[i].op_mode) {
 		case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +652,7 @@
 
 		esw_cfg[pci_func].pci_func = pci_func;
 		if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 	}
 	qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 	return size;
@@ -669,11 +667,11 @@
 	for (i = 0; i < count; i++) {
 		pci_func = np_cfg[i].pci_func;
 		if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 
 		if (!IS_VALID_BW(np_cfg[i].min_bw) ||
 		    !IS_VALID_BW(np_cfg[i].max_bw))
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 	}
 	return 0;
 }
@@ -694,7 +692,7 @@
 	count	= size / sizeof(struct qlcnic_npar_func_cfg);
 	rem	= size % sizeof(struct qlcnic_npar_func_cfg);
 	if (rem)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 	np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +715,7 @@
 			return ret;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
 		if (index < 0)
-			return QL_STATUS_INVALID_PARAM;
+			return -EINVAL;
 		adapter->npars[index].min_bw = nic_info.min_tx_bw;
 		adapter->npars[index].max_bw = nic_info.max_tx_bw;
 	}
@@ -784,13 +782,13 @@
 	int ret;
 
 	if (qlcnic_83xx_check(adapter))
-		return QLC_STATUS_UNSUPPORTED_CMD;
+		return -EOPNOTSUPP;
 
 	if (size != sizeof(struct qlcnic_esw_statistics))
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	if (offset >= adapter->ahw->max_vnic_func)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	memset(&port_stats, 0, size);
 	ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +817,13 @@
 	int ret;
 
 	if (qlcnic_83xx_check(adapter))
-		return QLC_STATUS_UNSUPPORTED_CMD;
+		return -EOPNOTSUPP;
 
 	if (size != sizeof(struct qlcnic_esw_statistics))
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	memset(&esw_stats, 0, size);
 	ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +851,10 @@
 	int ret;
 
 	if (qlcnic_83xx_check(adapter))
-		return QLC_STATUS_UNSUPPORTED_CMD;
+		return -EOPNOTSUPP;
 
 	if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
 				     QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +881,10 @@
 	int ret;
 
 	if (qlcnic_83xx_check(adapter))
-		return QLC_STATUS_UNSUPPORTED_CMD;
+		return -EOPNOTSUPP;
 
 	if (offset >= adapter->ahw->max_vnic_func)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
 				     QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +951,7 @@
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
 	if (!size)
-		return QL_STATUS_INVALID_PARAM;
-	if (!buf)
-		return QL_STATUS_INVALID_PARAM;
+		return -EINVAL;
 
 	count = size / sizeof(u32);
 
@@ -1132,9 +1128,6 @@
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
-	if (!buf)
-		return QL_STATUS_INVALID_PARAM;
-
 	ret = kstrtoul(buf, 16, &data);
 
 	switch (data) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 25800a1..02b7115 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3871,9 +3871,6 @@
 		return status;
 	}
 
-	end_jiffies = jiffies +
-		max((unsigned long)1, usecs_to_jiffies(30));
-
 	/* Check if bit is set then skip the mailbox command and
 	 * clear the bit, else we are in normal reset process.
 	 */
@@ -3888,6 +3885,7 @@
 
 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
 
+	end_jiffies = jiffies + usecs_to_jiffies(30);
 	do {
 		value = ql_read32(qdev, RST_FO);
 		if ((value & RST_FO_FR) == 0)
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9a49f42..a76e380 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -6,9 +6,7 @@
 	bool "Qualcomm devices"
 	default y
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6af028d..2f87909 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -839,7 +839,7 @@
 MODULE_DEVICE_TABLE(of, qca_spi_of_match);
 
 static int
-qca_spi_probe(struct spi_device *spi_device)
+qca_spi_probe(struct spi_device *spi)
 {
 	struct qcaspi *qca = NULL;
 	struct net_device *qcaspi_devs = NULL;
@@ -847,52 +847,52 @@
 	u16 signature;
 	const char *mac;
 
-	if (!spi_device->dev.of_node) {
-		dev_err(&spi_device->dev, "Missing device tree\n");
+	if (!spi->dev.of_node) {
+		dev_err(&spi->dev, "Missing device tree\n");
 		return -EINVAL;
 	}
 
-	legacy_mode = of_property_read_bool(spi_device->dev.of_node,
+	legacy_mode = of_property_read_bool(spi->dev.of_node,
 					    "qca,legacy-mode");
 
 	if (qcaspi_clkspeed == 0) {
-		if (spi_device->max_speed_hz)
-			qcaspi_clkspeed = spi_device->max_speed_hz;
+		if (spi->max_speed_hz)
+			qcaspi_clkspeed = spi->max_speed_hz;
 		else
 			qcaspi_clkspeed = QCASPI_CLK_SPEED;
 	}
 
 	if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
 	    (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
-		dev_info(&spi_device->dev, "Invalid clkspeed: %d\n",
+		dev_info(&spi->dev, "Invalid clkspeed: %d\n",
 			 qcaspi_clkspeed);
 		return -EINVAL;
 	}
 
 	if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
 	    (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
-		dev_info(&spi_device->dev, "Invalid burst len: %d\n",
+		dev_info(&spi->dev, "Invalid burst len: %d\n",
 			 qcaspi_burst_len);
 		return -EINVAL;
 	}
 
 	if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
 	    (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
-		dev_info(&spi_device->dev, "Invalid pluggable: %d\n",
+		dev_info(&spi->dev, "Invalid pluggable: %d\n",
 			 qcaspi_pluggable);
 		return -EINVAL;
 	}
 
-	dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+	dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
 		 QCASPI_DRV_VERSION,
 		 qcaspi_clkspeed,
 		 qcaspi_burst_len,
 		 qcaspi_pluggable);
 
-	spi_device->mode = SPI_MODE_3;
-	spi_device->max_speed_hz = qcaspi_clkspeed;
-	if (spi_setup(spi_device) < 0) {
-		dev_err(&spi_device->dev, "Unable to setup SPI device\n");
+	spi->mode = SPI_MODE_3;
+	spi->max_speed_hz = qcaspi_clkspeed;
+	if (spi_setup(spi) < 0) {
+		dev_err(&spi->dev, "Unable to setup SPI device\n");
 		return -EFAULT;
 	}
 
@@ -905,23 +905,23 @@
 	qca = netdev_priv(qcaspi_devs);
 	if (!qca) {
 		free_netdev(qcaspi_devs);
-		dev_err(&spi_device->dev, "Fail to retrieve private structure\n");
+		dev_err(&spi->dev, "Fail to retrieve private structure\n");
 		return -ENOMEM;
 	}
 	qca->net_dev = qcaspi_devs;
-	qca->spi_dev = spi_device;
+	qca->spi_dev = spi;
 	qca->legacy_mode = legacy_mode;
 
-	spi_set_drvdata(spi_device, qcaspi_devs);
+	spi_set_drvdata(spi, qcaspi_devs);
 
-	mac = of_get_mac_address(spi_device->dev.of_node);
+	mac = of_get_mac_address(spi->dev.of_node);
 
 	if (mac)
 		ether_addr_copy(qca->net_dev->dev_addr, mac);
 
 	if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
 		eth_hw_addr_random(qca->net_dev);
-		dev_info(&spi_device->dev, "Using random MAC address: %pM\n",
+		dev_info(&spi->dev, "Using random MAC address: %pM\n",
 			 qca->net_dev->dev_addr);
 	}
 
@@ -932,7 +932,7 @@
 		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
 
 		if (signature != QCASPI_GOOD_SIGNATURE) {
-			dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n",
+			dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
 				signature);
 			free_netdev(qcaspi_devs);
 			return -EFAULT;
@@ -940,7 +940,7 @@
 	}
 
 	if (register_netdev(qcaspi_devs)) {
-		dev_info(&spi_device->dev, "Unable to register net device %s\n",
+		dev_info(&spi->dev, "Unable to register net device %s\n",
 			 qcaspi_devs->name);
 		free_netdev(qcaspi_devs);
 		return -EFAULT;
@@ -952,9 +952,9 @@
 }
 
 static int
-qca_spi_remove(struct spi_device *spi_device)
+qca_spi_remove(struct spi_device *spi)
 {
-	struct net_device *qcaspi_devs = spi_get_drvdata(spi_device);
+	struct net_device *qcaspi_devs = spi_get_drvdata(spi);
 	struct qcaspi *qca = netdev_priv(qcaspi_devs);
 
 	qcaspi_remove_device_debugfs(qca);
diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig
index 2055f7e..a9c4e99 100644
--- a/drivers/net/ethernet/rdc/Kconfig
+++ b/drivers/net/ethernet/rdc/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index ae5d027..7c69f4c 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI || (PARPORT && X86)
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -24,8 +22,7 @@
 	select CRC32
 	---help---
 	  This is a network (Ethernet) device which attaches to your parallel
-	  port. Read <file:drivers/net/ethernet/realtek/atp.c> as well as the
-	  Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>,
+	  port. Read the file <file:drivers/net/ethernet/realtek/atp.c>
 	  if you want to use this.  If you intend to use this driver, you
 	  should have said N to the "Parallel printer support", because the two
 	  drivers don't like each other.
@@ -40,9 +37,7 @@
 	select MII
 	---help---
 	  This is a driver for the Fast Ethernet PCI network cards based on
-	  the RTL8139C+ chips. If you have one of those, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  the RTL8139C+ chips. If you have one of those, say Y here.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called 8139cp.  This is recommended.
@@ -54,8 +49,7 @@
 	select MII
 	---help---
 	  This is a driver for the Fast Ethernet PCI network cards based on
-	  the RTL 8129/8130/8139 chips. If you have one of those, say Y and
-	  read the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
+	  the RTL 8129/8130/8139 chips. If you have one of those, say Y here.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called 8139too.  This is recommended.
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 196e98a..270c4c9 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -2,6 +2,19 @@
 # Renesas device configuration
 #
 
+config NET_VENDOR_RENESAS
+	bool "Renesas devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Renesas devices. If you say Y, you will be asked
+	  for your specific device in the following questions.
+
+if NET_VENDOR_RENESAS
+
 config SH_ETH
 	tristate "Renesas SuperH Ethernet support"
 	depends on HAS_DMA
@@ -15,3 +28,19 @@
 	  This driver supporting CPUs are:
 		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
 		  R8A7740, R8A777x and R8A779x.
+
+config RAVB
+	tristate "Renesas Ethernet AVB support"
+	depends on HAS_DMA
+	depends on ARCH_SHMOBILE || COMPILE_TEST
+	select CRC32
+	select MII
+	select MDIO_BITBANG
+	select PHYLIB
+	select PTP_1588_CLOCK
+	help
+	  Renesas Ethernet AVB device driver.
+	  This driver supports the following SoCs:
+		- R8A779x.
+
+endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 1c278a8..a05102a 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -3,3 +3,7 @@
 #
 
 obj-$(CONFIG_SH_ETH) += sh_eth.o
+
+ravb-objs := ravb_main.o ravb_ptp.o
+
+obj-$(CONFIG_RAVB) += ravb.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
new file mode 100644
index 0000000..8aa50ac
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -0,0 +1,832 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RAVB_H__
+#define __RAVB_H__
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define BE_TX_RING_SIZE	64	/* TX ring size for Best Effort */
+#define BE_RX_RING_SIZE	1024	/* RX ring size for Best Effort */
+#define NC_TX_RING_SIZE	64	/* TX ring size for Network Control */
+#define NC_RX_RING_SIZE	64	/* RX ring size for Network Control */
+#define BE_TX_RING_MIN	64
+#define BE_RX_RING_MIN	64
+#define BE_TX_RING_MAX	1024
+#define BE_RX_RING_MAX	2048
+
+#define PKT_BUF_SZ	1538
+
+/* Driver's parameters */
+#define RAVB_ALIGN	128
+
+/* Hardware time stamp */
+#define RAVB_TXTSTAMP_VALID	0x00000001	/* TX timestamp valid */
+#define RAVB_TXTSTAMP_ENABLED	0x00000010	/* Enable TX timestamping */
+
+#define RAVB_RXTSTAMP_VALID	0x00000001	/* RX timestamp valid */
+#define RAVB_RXTSTAMP_TYPE	0x00000006	/* RX type mask */
+#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
+#define RAVB_RXTSTAMP_TYPE_ALL	0x00000006
+#define RAVB_RXTSTAMP_ENABLED	0x00000010	/* Enable RX timestamping */
+
+enum ravb_reg {
+	/* AVB-DMAC registers */
+	CCC	= 0x0000,
+	DBAT	= 0x0004,
+	DLR	= 0x0008,
+	CSR	= 0x000C,
+	CDAR0	= 0x0010,
+	CDAR1	= 0x0014,
+	CDAR2	= 0x0018,
+	CDAR3	= 0x001C,
+	CDAR4	= 0x0020,
+	CDAR5	= 0x0024,
+	CDAR6	= 0x0028,
+	CDAR7	= 0x002C,
+	CDAR8	= 0x0030,
+	CDAR9	= 0x0034,
+	CDAR10	= 0x0038,
+	CDAR11	= 0x003C,
+	CDAR12	= 0x0040,
+	CDAR13	= 0x0044,
+	CDAR14	= 0x0048,
+	CDAR15	= 0x004C,
+	CDAR16	= 0x0050,
+	CDAR17	= 0x0054,
+	CDAR18	= 0x0058,
+	CDAR19	= 0x005C,
+	CDAR20	= 0x0060,
+	CDAR21	= 0x0064,
+	ESR	= 0x0088,
+	RCR	= 0x0090,
+	RQC0	= 0x0094,
+	RQC1	= 0x0098,
+	RQC2	= 0x009C,
+	RQC3	= 0x00A0,
+	RQC4	= 0x00A4,
+	RPC	= 0x00B0,
+	UFCW	= 0x00BC,
+	UFCS	= 0x00C0,
+	UFCV0	= 0x00C4,
+	UFCV1	= 0x00C8,
+	UFCV2	= 0x00CC,
+	UFCV3	= 0x00D0,
+	UFCV4	= 0x00D4,
+	UFCD0	= 0x00E0,
+	UFCD1	= 0x00E4,
+	UFCD2	= 0x00E8,
+	UFCD3	= 0x00EC,
+	UFCD4	= 0x00F0,
+	SFO	= 0x00FC,
+	SFP0	= 0x0100,
+	SFP1	= 0x0104,
+	SFP2	= 0x0108,
+	SFP3	= 0x010C,
+	SFP4	= 0x0110,
+	SFP5	= 0x0114,
+	SFP6	= 0x0118,
+	SFP7	= 0x011C,
+	SFP8	= 0x0120,
+	SFP9	= 0x0124,
+	SFP10	= 0x0128,
+	SFP11	= 0x012C,
+	SFP12	= 0x0130,
+	SFP13	= 0x0134,
+	SFP14	= 0x0138,
+	SFP15	= 0x013C,
+	SFP16	= 0x0140,
+	SFP17	= 0x0144,
+	SFP18	= 0x0148,
+	SFP19	= 0x014C,
+	SFP20	= 0x0150,
+	SFP21	= 0x0154,
+	SFP22	= 0x0158,
+	SFP23	= 0x015C,
+	SFP24	= 0x0160,
+	SFP25	= 0x0164,
+	SFP26	= 0x0168,
+	SFP27	= 0x016C,
+	SFP28	= 0x0170,
+	SFP29	= 0x0174,
+	SFP30	= 0x0178,
+	SFP31	= 0x017C,
+	SFM0	= 0x01C0,
+	SFM1	= 0x01C4,
+	TGC	= 0x0300,
+	TCCR	= 0x0304,
+	TSR	= 0x0308,
+	TFA0	= 0x0310,
+	TFA1	= 0x0314,
+	TFA2	= 0x0318,
+	CIVR0	= 0x0320,
+	CIVR1	= 0x0324,
+	CDVR0	= 0x0328,
+	CDVR1	= 0x032C,
+	CUL0	= 0x0330,
+	CUL1	= 0x0334,
+	CLL0	= 0x0338,
+	CLL1	= 0x033C,
+	DIC	= 0x0350,
+	DIS	= 0x0354,
+	EIC	= 0x0358,
+	EIS	= 0x035C,
+	RIC0	= 0x0360,
+	RIS0	= 0x0364,
+	RIC1	= 0x0368,
+	RIS1	= 0x036C,
+	RIC2	= 0x0370,
+	RIS2	= 0x0374,
+	TIC	= 0x0378,
+	TIS	= 0x037C,
+	ISS	= 0x0380,
+	GCCR	= 0x0390,
+	GMTT	= 0x0394,
+	GPTC	= 0x0398,
+	GTI	= 0x039C,
+	GTO0	= 0x03A0,
+	GTO1	= 0x03A4,
+	GTO2	= 0x03A8,
+	GIC	= 0x03AC,
+	GIS	= 0x03B0,
+	GCPT	= 0x03B4,	/* Undocumented? */
+	GCT0	= 0x03B8,
+	GCT1	= 0x03BC,
+	GCT2	= 0x03C0,
+
+	/* E-MAC registers */
+	ECMR	= 0x0500,
+	RFLR	= 0x0508,
+	ECSR	= 0x0510,
+	ECSIPR	= 0x0518,
+	PIR	= 0x0520,
+	PSR	= 0x0528,
+	PIPR	= 0x052c,
+	MPR	= 0x0558,
+	PFTCR	= 0x055c,
+	PFRCR	= 0x0560,
+	GECMR	= 0x05b0,
+	MAHR	= 0x05c0,
+	MALR	= 0x05c8,
+	TROCR	= 0x0700,	/* Undocumented? */
+	CDCR	= 0x0708,	/* Undocumented? */
+	LCCR	= 0x0710,	/* Undocumented? */
+	CEFCR	= 0x0740,
+	FRECR	= 0x0748,
+	TSFRCR	= 0x0750,
+	TLFRCR	= 0x0758,
+	RFCR	= 0x0760,
+	CERCR	= 0x0768,	/* Undocumented? */
+	CEECR	= 0x0770,	/* Undocumented? */
+	MAFCR	= 0x0778,
+};
+
+
+/* Register bits of the Ethernet AVB */
+/* CCC */
+enum CCC_BIT {
+	CCC_OPC		= 0x00000003,
+	CCC_OPC_RESET	= 0x00000000,
+	CCC_OPC_CONFIG	= 0x00000001,
+	CCC_OPC_OPERATION = 0x00000002,
+	CCC_DTSR	= 0x00000100,
+	CCC_CSEL	= 0x00030000,
+	CCC_CSEL_HPB	= 0x00010000,
+	CCC_CSEL_ETH_TX	= 0x00020000,
+	CCC_CSEL_GMII_REF = 0x00030000,
+	CCC_BOC		= 0x00100000,	/* Undocumented? */
+	CCC_LBME	= 0x01000000,
+};
+
+/* CSR */
+enum CSR_BIT {
+	CSR_OPS		= 0x0000000F,
+	CSR_OPS_RESET	= 0x00000001,
+	CSR_OPS_CONFIG	= 0x00000002,
+	CSR_OPS_OPERATION = 0x00000004,
+	CSR_OPS_STANDBY	= 0x00000008,	/* Undocumented? */
+	CSR_DTS		= 0x00000100,
+	CSR_TPO0	= 0x00010000,
+	CSR_TPO1	= 0x00020000,
+	CSR_TPO2	= 0x00040000,
+	CSR_TPO3	= 0x00080000,
+	CSR_RPO		= 0x00100000,
+};
+
+/* ESR */
+enum ESR_BIT {
+	ESR_EQN		= 0x0000001F,
+	ESR_ET		= 0x00000F00,
+	ESR_EIL		= 0x00001000,
+};
+
+/* RCR */
+enum RCR_BIT {
+	RCR_EFFS	= 0x00000001,
+	RCR_ENCF	= 0x00000002,
+	RCR_ESF		= 0x0000000C,
+	RCR_ETS0	= 0x00000010,
+	RCR_ETS2	= 0x00000020,
+	RCR_RFCL	= 0x1FFF0000,
+};
+
+/* RQC0/1/2/3/4 */
+enum RQC_BIT {
+	RQC_RSM0	= 0x00000003,
+	RQC_UFCC0	= 0x00000030,
+	RQC_RSM1	= 0x00000300,
+	RQC_UFCC1	= 0x00003000,
+	RQC_RSM2	= 0x00030000,
+	RQC_UFCC2	= 0x00300000,
+	RQC_RSM3	= 0x03000000,
+	RQC_UFCC3	= 0x30000000,
+};
+
+/* RPC */
+enum RPC_BIT {
+	RPC_PCNT	= 0x00000700,
+	RPC_DCNT	= 0x00FF0000,
+};
+
+/* UFCW */
+enum UFCW_BIT {
+	UFCW_WL0	= 0x0000003F,
+	UFCW_WL1	= 0x00003F00,
+	UFCW_WL2	= 0x003F0000,
+	UFCW_WL3	= 0x3F000000,
+};
+
+/* UFCS */
+enum UFCS_BIT {
+	UFCS_SL0	= 0x0000003F,
+	UFCS_SL1	= 0x00003F00,
+	UFCS_SL2	= 0x003F0000,
+	UFCS_SL3	= 0x3F000000,
+};
+
+/* UFCV0/1/2/3/4 */
+enum UFCV_BIT {
+	UFCV_CV0	= 0x0000003F,
+	UFCV_CV1	= 0x00003F00,
+	UFCV_CV2	= 0x003F0000,
+	UFCV_CV3	= 0x3F000000,
+};
+
+/* UFCD0/1/2/3/4 */
+enum UFCD_BIT {
+	UFCD_DV0	= 0x0000003F,
+	UFCD_DV1	= 0x00003F00,
+	UFCD_DV2	= 0x003F0000,
+	UFCD_DV3	= 0x3F000000,
+};
+
+/* SFO */
+enum SFO_BIT {
+	SFO_FPB		= 0x0000003F,
+};
+
+/* RTC */
+enum RTC_BIT {
+	RTC_MFL0	= 0x00000FFF,
+	RTC_MFL1	= 0x0FFF0000,
+};
+
+/* TGC */
+enum TGC_BIT {
+	TGC_TSM0	= 0x00000001,
+	TGC_TSM1	= 0x00000002,
+	TGC_TSM2	= 0x00000004,
+	TGC_TSM3	= 0x00000008,
+	TGC_TQP		= 0x00000030,
+	TGC_TQP_NONAVB	= 0x00000000,
+	TGC_TQP_AVBMODE1 = 0x00000010,
+	TGC_TQP_AVBMODE2 = 0x00000030,
+	TGC_TBD0	= 0x00000300,
+	TGC_TBD1	= 0x00003000,
+	TGC_TBD2	= 0x00030000,
+	TGC_TBD3	= 0x00300000,
+};
+
+/* TCCR */
+enum TCCR_BIT {
+	TCCR_TSRQ0	= 0x00000001,
+	TCCR_TSRQ1	= 0x00000002,
+	TCCR_TSRQ2	= 0x00000004,
+	TCCR_TSRQ3	= 0x00000008,
+	TCCR_TFEN	= 0x00000100,
+	TCCR_TFR	= 0x00000200,
+};
+
+/* TSR */
+enum TSR_BIT {
+	TSR_CCS0	= 0x00000003,
+	TSR_CCS1	= 0x0000000C,
+	TSR_TFFL	= 0x00000700,
+};
+
+/* TFA2 */
+enum TFA2_BIT {
+	TFA2_TSV	= 0x0000FFFF,
+	TFA2_TST	= 0x03FF0000,
+};
+
+/* DIC */
+enum DIC_BIT {
+	DIC_DPE1	= 0x00000002,
+	DIC_DPE2	= 0x00000004,
+	DIC_DPE3	= 0x00000008,
+	DIC_DPE4	= 0x00000010,
+	DIC_DPE5	= 0x00000020,
+	DIC_DPE6	= 0x00000040,
+	DIC_DPE7	= 0x00000080,
+	DIC_DPE8	= 0x00000100,
+	DIC_DPE9	= 0x00000200,
+	DIC_DPE10	= 0x00000400,
+	DIC_DPE11	= 0x00000800,
+	DIC_DPE12	= 0x00001000,
+	DIC_DPE13	= 0x00002000,
+	DIC_DPE14	= 0x00004000,
+	DIC_DPE15	= 0x00008000,
+};
+
+/* DIS */
+enum DIS_BIT {
+	DIS_DPF1	= 0x00000002,
+	DIS_DPF2	= 0x00000004,
+	DIS_DPF3	= 0x00000008,
+	DIS_DPF4	= 0x00000010,
+	DIS_DPF5	= 0x00000020,
+	DIS_DPF6	= 0x00000040,
+	DIS_DPF7	= 0x00000080,
+	DIS_DPF8	= 0x00000100,
+	DIS_DPF9	= 0x00000200,
+	DIS_DPF10	= 0x00000400,
+	DIS_DPF11	= 0x00000800,
+	DIS_DPF12	= 0x00001000,
+	DIS_DPF13	= 0x00002000,
+	DIS_DPF14	= 0x00004000,
+	DIS_DPF15	= 0x00008000,
+};
+
+/* EIC */
+enum EIC_BIT {
+	EIC_MREE	= 0x00000001,
+	EIC_MTEE	= 0x00000002,
+	EIC_QEE		= 0x00000004,
+	EIC_SEE		= 0x00000008,
+	EIC_CLLE0	= 0x00000010,
+	EIC_CLLE1	= 0x00000020,
+	EIC_CULE0	= 0x00000040,
+	EIC_CULE1	= 0x00000080,
+	EIC_TFFE	= 0x00000100,
+};
+
+/* EIS */
+enum EIS_BIT {
+	EIS_MREF	= 0x00000001,
+	EIS_MTEF	= 0x00000002,
+	EIS_QEF		= 0x00000004,
+	EIS_SEF		= 0x00000008,
+	EIS_CLLF0	= 0x00000010,
+	EIS_CLLF1	= 0x00000020,
+	EIS_CULF0	= 0x00000040,
+	EIS_CULF1	= 0x00000080,
+	EIS_TFFF	= 0x00000100,
+	EIS_QFS		= 0x00010000,
+};
+
+/* RIC0 */
+enum RIC0_BIT {
+	RIC0_FRE0	= 0x00000001,
+	RIC0_FRE1	= 0x00000002,
+	RIC0_FRE2	= 0x00000004,
+	RIC0_FRE3	= 0x00000008,
+	RIC0_FRE4	= 0x00000010,
+	RIC0_FRE5	= 0x00000020,
+	RIC0_FRE6	= 0x00000040,
+	RIC0_FRE7	= 0x00000080,
+	RIC0_FRE8	= 0x00000100,
+	RIC0_FRE9	= 0x00000200,
+	RIC0_FRE10	= 0x00000400,
+	RIC0_FRE11	= 0x00000800,
+	RIC0_FRE12	= 0x00001000,
+	RIC0_FRE13	= 0x00002000,
+	RIC0_FRE14	= 0x00004000,
+	RIC0_FRE15	= 0x00008000,
+	RIC0_FRE16	= 0x00010000,
+	RIC0_FRE17	= 0x00020000,
+};
+
+/* RIC0 */
+enum RIS0_BIT {
+	RIS0_FRF0	= 0x00000001,
+	RIS0_FRF1	= 0x00000002,
+	RIS0_FRF2	= 0x00000004,
+	RIS0_FRF3	= 0x00000008,
+	RIS0_FRF4	= 0x00000010,
+	RIS0_FRF5	= 0x00000020,
+	RIS0_FRF6	= 0x00000040,
+	RIS0_FRF7	= 0x00000080,
+	RIS0_FRF8	= 0x00000100,
+	RIS0_FRF9	= 0x00000200,
+	RIS0_FRF10	= 0x00000400,
+	RIS0_FRF11	= 0x00000800,
+	RIS0_FRF12	= 0x00001000,
+	RIS0_FRF13	= 0x00002000,
+	RIS0_FRF14	= 0x00004000,
+	RIS0_FRF15	= 0x00008000,
+	RIS0_FRF16	= 0x00010000,
+	RIS0_FRF17	= 0x00020000,
+};
+
+/* RIC1 */
+enum RIC1_BIT {
+	RIC1_RFWE	= 0x80000000,
+};
+
+/* RIS1 */
+enum RIS1_BIT {
+	RIS1_RFWF	= 0x80000000,
+};
+
+/* RIC2 */
+enum RIC2_BIT {
+	RIC2_QFE0	= 0x00000001,
+	RIC2_QFE1	= 0x00000002,
+	RIC2_QFE2	= 0x00000004,
+	RIC2_QFE3	= 0x00000008,
+	RIC2_QFE4	= 0x00000010,
+	RIC2_QFE5	= 0x00000020,
+	RIC2_QFE6	= 0x00000040,
+	RIC2_QFE7	= 0x00000080,
+	RIC2_QFE8	= 0x00000100,
+	RIC2_QFE9	= 0x00000200,
+	RIC2_QFE10	= 0x00000400,
+	RIC2_QFE11	= 0x00000800,
+	RIC2_QFE12	= 0x00001000,
+	RIC2_QFE13	= 0x00002000,
+	RIC2_QFE14	= 0x00004000,
+	RIC2_QFE15	= 0x00008000,
+	RIC2_QFE16	= 0x00010000,
+	RIC2_QFE17	= 0x00020000,
+	RIC2_RFFE	= 0x80000000,
+};
+
+/* RIS2 */
+enum RIS2_BIT {
+	RIS2_QFF0	= 0x00000001,
+	RIS2_QFF1	= 0x00000002,
+	RIS2_QFF2	= 0x00000004,
+	RIS2_QFF3	= 0x00000008,
+	RIS2_QFF4	= 0x00000010,
+	RIS2_QFF5	= 0x00000020,
+	RIS2_QFF6	= 0x00000040,
+	RIS2_QFF7	= 0x00000080,
+	RIS2_QFF8	= 0x00000100,
+	RIS2_QFF9	= 0x00000200,
+	RIS2_QFF10	= 0x00000400,
+	RIS2_QFF11	= 0x00000800,
+	RIS2_QFF12	= 0x00001000,
+	RIS2_QFF13	= 0x00002000,
+	RIS2_QFF14	= 0x00004000,
+	RIS2_QFF15	= 0x00008000,
+	RIS2_QFF16	= 0x00010000,
+	RIS2_QFF17	= 0x00020000,
+	RIS2_RFFF	= 0x80000000,
+};
+
+/* TIC */
+enum TIC_BIT {
+	TIC_FTE0	= 0x00000001,	/* Undocumented? */
+	TIC_FTE1	= 0x00000002,	/* Undocumented? */
+	TIC_TFUE	= 0x00000100,
+	TIC_TFWE	= 0x00000200,
+};
+
+/* TIS */
+enum TIS_BIT {
+	TIS_FTF0	= 0x00000001,	/* Undocumented? */
+	TIS_FTF1	= 0x00000002,	/* Undocumented? */
+	TIS_TFUF	= 0x00000100,
+	TIS_TFWF	= 0x00000200,
+};
+
+/* ISS */
+enum ISS_BIT {
+	ISS_FRS		= 0x00000001,	/* Undocumented? */
+	ISS_FTS		= 0x00000004,	/* Undocumented? */
+	ISS_ES		= 0x00000040,
+	ISS_MS		= 0x00000080,
+	ISS_TFUS	= 0x00000100,
+	ISS_TFWS	= 0x00000200,
+	ISS_RFWS	= 0x00001000,
+	ISS_CGIS	= 0x00002000,
+	ISS_DPS1	= 0x00020000,
+	ISS_DPS2	= 0x00040000,
+	ISS_DPS3	= 0x00080000,
+	ISS_DPS4	= 0x00100000,
+	ISS_DPS5	= 0x00200000,
+	ISS_DPS6	= 0x00400000,
+	ISS_DPS7	= 0x00800000,
+	ISS_DPS8	= 0x01000000,
+	ISS_DPS9	= 0x02000000,
+	ISS_DPS10	= 0x04000000,
+	ISS_DPS11	= 0x08000000,
+	ISS_DPS12	= 0x10000000,
+	ISS_DPS13	= 0x20000000,
+	ISS_DPS14	= 0x40000000,
+	ISS_DPS15	= 0x80000000,
+};
+
+/* GCCR */
+enum GCCR_BIT {
+	GCCR_TCR	= 0x00000003,
+	GCCR_TCR_NOREQ	= 0x00000000, /* No request */
+	GCCR_TCR_RESET	= 0x00000001, /* gPTP/AVTP presentation timer reset */
+	GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */
+	GCCR_LTO	= 0x00000004,
+	GCCR_LTI	= 0x00000008,
+	GCCR_LPTC	= 0x00000010,
+	GCCR_LMTT	= 0x00000020,
+	GCCR_TCSS	= 0x00000300,
+	GCCR_TCSS_GPTP	= 0x00000000,	/* gPTP timer value */
+	GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */
+	GCCR_TCSS_AVTP	= 0x00000200,	/* AVTP presentation time value */
+};
+
+/* GTI */
+enum GTI_BIT {
+	GTI_TIV		= 0x0FFFFFFF,
+};
+
+/* GIC */
+enum GIC_BIT {
+	GIC_PTCE	= 0x00000001,	/* Undocumented? */
+	GIC_PTME	= 0x00000004,
+};
+
+/* GIS */
+enum GIS_BIT {
+	GIS_PTCF	= 0x00000001,	/* Undocumented? */
+	GIS_PTMF	= 0x00000004,
+};
+
+/* ECMR */
+enum ECMR_BIT {
+	ECMR_PRM	= 0x00000001,
+	ECMR_DM		= 0x00000002,
+	ECMR_TE		= 0x00000020,
+	ECMR_RE		= 0x00000040,
+	ECMR_MPDE	= 0x00000200,
+	ECMR_TXF	= 0x00010000,	/* Undocumented? */
+	ECMR_RXF	= 0x00020000,
+	ECMR_PFR	= 0x00040000,
+	ECMR_ZPF	= 0x00080000,	/* Undocumented? */
+	ECMR_RZPF	= 0x00100000,
+	ECMR_DPAD	= 0x00200000,
+	ECMR_RCSC	= 0x00800000,
+	ECMR_TRCCM	= 0x04000000,
+};
+
+/* ECSR */
+enum ECSR_BIT {
+	ECSR_ICD	= 0x00000001,
+	ECSR_MPD	= 0x00000002,
+	ECSR_LCHNG	= 0x00000004,
+	ECSR_PHYI	= 0x00000008,
+};
+
+/* ECSIPR */
+enum ECSIPR_BIT {
+	ECSIPR_ICDIP	= 0x00000001,
+	ECSIPR_MPDIP	= 0x00000002,
+	ECSIPR_LCHNGIP	= 0x00000004,	/* Undocumented? */
+};
+
+/* PIR */
+enum PIR_BIT {
+	PIR_MDC		= 0x00000001,
+	PIR_MMD		= 0x00000002,
+	PIR_MDO		= 0x00000004,
+	PIR_MDI		= 0x00000008,
+};
+
+/* PSR */
+enum PSR_BIT {
+	PSR_LMON	= 0x00000001,
+};
+
+/* PIPR */
+enum PIPR_BIT {
+	PIPR_PHYIP	= 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+	MPR_MP		= 0x0000ffff,
+};
+
+/* GECMR */
+enum GECMR_BIT {
+	GECMR_SPEED	= 0x00000001,
+	GECMR_SPEED_100	= 0x00000000,
+	GECMR_SPEED_1000 = 0x00000001,
+};
+
+/* The Ethernet AVB descriptor definitions. */
+struct ravb_desc {
+	__le16 ds;		/* Descriptor size */
+	u8 cc;		/* Content control MSBs (reserved) */
+	u8 die_dt;	/* Descriptor interrupt enable and type */
+	__le32 dptr;	/* Descriptor pointer */
+};
+
+enum DIE_DT {
+	/* Frame data */
+	DT_FMID		= 0x40,
+	DT_FSTART	= 0x50,
+	DT_FEND		= 0x60,
+	DT_FSINGLE	= 0x70,
+	/* Chain control */
+	DT_LINK		= 0x80,
+	DT_LINKFIX	= 0x90,
+	DT_EOS		= 0xa0,
+	/* HW/SW arbitration */
+	DT_FEMPTY	= 0xc0,
+	DT_FEMPTY_IS	= 0xd0,
+	DT_FEMPTY_IC	= 0xe0,
+	DT_FEMPTY_ND	= 0xf0,
+	DT_LEMPTY	= 0x20,
+	DT_EEMPTY	= 0x30,
+};
+
+struct ravb_rx_desc {
+	__le16 ds_cc;	/* Descriptor size and content control LSBs */
+	u8 msc;		/* MAC status code */
+	u8 die_dt;	/* Descriptor interrupt enable and type */
+	__le32 dptr;	/* Descpriptor pointer */
+};
+
+struct ravb_ex_rx_desc {
+	__le16 ds_cc;	/* Descriptor size and content control lower bits */
+	u8 msc;		/* MAC status code */
+	u8 die_dt;	/* Descriptor interrupt enable and type */
+	__le32 dptr;	/* Descpriptor pointer */
+	__le32 ts_n;	/* Timestampe nsec */
+	__le32 ts_sl;	/* Timestamp low */
+	__le16 ts_sh;	/* Timestamp high */
+	__le16 res;	/* Reserved bits */
+};
+
+enum RX_DS_CC_BIT {
+	RX_DS		= 0x0fff, /* Data size */
+	RX_TR		= 0x1000, /* Truncation indication */
+	RX_EI		= 0x2000, /* Error indication */
+	RX_PS		= 0xc000, /* Padding selection */
+};
+
+/* E-MAC status code */
+enum MSC_BIT {
+	MSC_CRC		= 0x01, /* Frame CRC error */
+	MSC_RFE		= 0x02, /* Frame reception error (flagged by PHY) */
+	MSC_RTSF	= 0x04, /* Frame length error (frame too short) */
+	MSC_RTLF	= 0x08, /* Frame length error (frame too long) */
+	MSC_FRE		= 0x10, /* Fraction error (not a multiple of 8 bits) */
+	MSC_CRL		= 0x20, /* Carrier lost */
+	MSC_CEEF	= 0x40, /* Carrier extension error */
+	MSC_MC		= 0x80, /* Multicast frame reception */
+};
+
+struct ravb_tx_desc {
+	__le16 ds_tagl;	/* Descriptor size and frame tag LSBs */
+	u8 tagh_tsr;	/* Frame tag MSBs and timestamp storage request bit */
+	u8 die_dt;	/* Descriptor interrupt enable and type */
+	__le32 dptr;	/* Descpriptor pointer */
+};
+
+enum TX_DS_TAGL_BIT {
+	TX_DS		= 0x0fff, /* Data size */
+	TX_TAGL		= 0xf000, /* Frame tag LSBs */
+};
+
+enum TX_TAGH_TSR_BIT {
+	TX_TAGH		= 0x3f, /* Frame tag MSBs */
+	TX_TSR		= 0x40, /* Timestamp storage request */
+};
+enum RAVB_QUEUE {
+	RAVB_BE = 0,	/* Best Effort Queue */
+	RAVB_NC,	/* Network Control Queue */
+};
+
+#define DBAT_ENTRY_NUM	22
+#define RX_QUEUE_OFFSET	4
+#define NUM_RX_QUEUE	2
+#define NUM_TX_QUEUE	2
+
+struct ravb_tstamp_skb {
+	struct list_head list;
+	struct sk_buff *skb;
+	u16 tag;
+};
+
+struct ravb_ptp_perout {
+	u32 target;
+	u32 period;
+};
+
+#define N_EXT_TS	1
+#define N_PER_OUT	1
+
+struct ravb_ptp {
+	struct ptp_clock *clock;
+	struct ptp_clock_info info;
+	u32 default_addend;
+	u32 current_addend;
+	int extts[N_EXT_TS];
+	struct ravb_ptp_perout perout[N_PER_OUT];
+};
+
+struct ravb_private {
+	struct net_device *ndev;
+	struct platform_device *pdev;
+	void __iomem *addr;
+	struct mdiobb_ctrl mdiobb;
+	u32 num_rx_ring[NUM_RX_QUEUE];
+	u32 num_tx_ring[NUM_TX_QUEUE];
+	u32 desc_bat_size;
+	dma_addr_t desc_bat_dma;
+	struct ravb_desc *desc_bat;
+	dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
+	dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+	struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+	struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+	struct sk_buff **rx_skb[NUM_RX_QUEUE];
+	struct sk_buff **tx_skb[NUM_TX_QUEUE];
+	void **tx_buffers[NUM_TX_QUEUE];
+	u32 rx_over_errors;
+	u32 rx_fifo_errors;
+	struct net_device_stats stats[NUM_RX_QUEUE];
+	u32 tstamp_tx_ctrl;
+	u32 tstamp_rx_ctrl;
+	struct list_head ts_skb_list;
+	u32 ts_skb_tag;
+	struct ravb_ptp ptp;
+	spinlock_t lock;		/* Register access lock */
+	u32 cur_rx[NUM_RX_QUEUE];	/* Consumer ring indices */
+	u32 dirty_rx[NUM_RX_QUEUE];	/* Producer ring indices */
+	u32 cur_tx[NUM_TX_QUEUE];
+	u32 dirty_tx[NUM_TX_QUEUE];
+	struct napi_struct napi[NUM_RX_QUEUE];
+	struct work_struct work;
+	/* MII transceiver section. */
+	struct mii_bus *mii_bus;	/* MDIO bus control */
+	struct phy_device *phydev;	/* PHY device control */
+	int link;
+	phy_interface_t phy_interface;
+	int msg_enable;
+	int speed;
+	int duplex;
+
+	unsigned no_avb_link:1;
+	unsigned avb_link_active_low:1;
+};
+
+static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	return ioread32(priv->addr + reg);
+}
+
+static inline void ravb_write(struct net_device *ndev, u32 data,
+			      enum ravb_reg reg)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	iowrite32(data, priv->addr + reg);
+}
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
+
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void ravb_ptp_stop(struct net_device *ndev);
+
+#endif	/* #ifndef __RAVB_H__ */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
new file mode 100644
index 0000000..fd97457
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -0,0 +1,1824 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/cache.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ravb.h"
+
+#define RAVB_DEF_MSG_ENABLE \
+		(NETIF_MSG_LINK	  | \
+		 NETIF_MSG_TIMER  | \
+		 NETIF_MSG_RX_ERR | \
+		 NETIF_MSG_TX_ERR)
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+{
+	int i;
+
+	for (i = 0; i < 10000; i++) {
+		if ((ravb_read(ndev, reg) & mask) == value)
+			return 0;
+		udelay(10);
+	}
+	return -ETIMEDOUT;
+}
+
+static int ravb_config(struct net_device *ndev)
+{
+	int error;
+
+	/* Set config mode */
+	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+		   CCC);
+	/* Check if the operating mode is changed to the config mode */
+	error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+	if (error)
+		netdev_err(ndev, "failed to switch device to config mode\n");
+
+	return error;
+}
+
+static void ravb_set_duplex(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	u32 ecmr = ravb_read(ndev, ECMR);
+
+	if (priv->duplex)	/* Full */
+		ecmr |=  ECMR_DM;
+	else			/* Half */
+		ecmr &= ~ECMR_DM;
+	ravb_write(ndev, ecmr, ECMR);
+}
+
+static void ravb_set_rate(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	switch (priv->speed) {
+	case 100:		/* 100BASE */
+		ravb_write(ndev, GECMR_SPEED_100, GECMR);
+		break;
+	case 1000:		/* 1000BASE */
+		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ravb_set_buffer_align(struct sk_buff *skb)
+{
+	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+
+	if (reserve)
+		skb_reserve(skb, RAVB_ALIGN - reserve);
+}
+
+/* Get MAC address from the MAC address registers
+ *
+ * Ethernet AVB device doesn't have ROM for MAC address.
+ * This function gets the MAC address that was used by a bootloader.
+ */
+static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+{
+	if (mac) {
+		ether_addr_copy(ndev->dev_addr, mac);
+	} else {
+		ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
+		ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
+		ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
+		ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
+		ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
+		ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
+	}
+}
+
+static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
+{
+	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+						 mdiobb);
+	u32 pir = ravb_read(priv->ndev, PIR);
+
+	if (set)
+		pir |=  mask;
+	else
+		pir &= ~mask;
+	ravb_write(priv->ndev, pir, PIR);
+}
+
+/* MDC pin control */
+static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
+}
+
+/* Data I/O pin control */
+static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
+}
+
+/* Set data bit */
+static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
+}
+
+/* Get data bit */
+static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+						 mdiobb);
+
+	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
+}
+
+/* MDIO bus control struct */
+static struct mdiobb_ops bb_ops = {
+	.owner = THIS_MODULE,
+	.set_mdc = ravb_set_mdc,
+	.set_mdio_dir = ravb_set_mdio_dir,
+	.set_mdio_data = ravb_set_mdio_data,
+	.get_mdio_data = ravb_get_mdio_data,
+};
+
+/* Free skb's and DMA buffers for Ethernet AVB */
+static void ravb_ring_free(struct net_device *ndev, int q)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int ring_size;
+	int i;
+
+	/* Free RX skb ringbuffer */
+	if (priv->rx_skb[q]) {
+		for (i = 0; i < priv->num_rx_ring[q]; i++)
+			dev_kfree_skb(priv->rx_skb[q][i]);
+	}
+	kfree(priv->rx_skb[q]);
+	priv->rx_skb[q] = NULL;
+
+	/* Free TX skb ringbuffer */
+	if (priv->tx_skb[q]) {
+		for (i = 0; i < priv->num_tx_ring[q]; i++)
+			dev_kfree_skb(priv->tx_skb[q][i]);
+	}
+	kfree(priv->tx_skb[q]);
+	priv->tx_skb[q] = NULL;
+
+	/* Free aligned TX buffers */
+	if (priv->tx_buffers[q]) {
+		for (i = 0; i < priv->num_tx_ring[q]; i++)
+			kfree(priv->tx_buffers[q][i]);
+	}
+	kfree(priv->tx_buffers[q]);
+	priv->tx_buffers[q] = NULL;
+
+	if (priv->rx_ring[q]) {
+		ring_size = sizeof(struct ravb_ex_rx_desc) *
+			    (priv->num_rx_ring[q] + 1);
+		dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+				  priv->rx_desc_dma[q]);
+		priv->rx_ring[q] = NULL;
+	}
+
+	if (priv->tx_ring[q]) {
+		ring_size = sizeof(struct ravb_tx_desc) *
+			    (priv->num_tx_ring[q] + 1);
+		dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+				  priv->tx_desc_dma[q]);
+		priv->tx_ring[q] = NULL;
+	}
+}
+
+/* Format skb and descriptor buffer for Ethernet AVB */
+static void ravb_ring_format(struct net_device *ndev, int q)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct ravb_ex_rx_desc *rx_desc = NULL;
+	struct ravb_tx_desc *tx_desc = NULL;
+	struct ravb_desc *desc = NULL;
+	int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	void *buffer;
+	int i;
+
+	priv->cur_rx[q] = 0;
+	priv->cur_tx[q] = 0;
+	priv->dirty_rx[q] = 0;
+	priv->dirty_tx[q] = 0;
+
+	memset(priv->rx_ring[q], 0, rx_ring_size);
+	/* Build RX ring buffer */
+	for (i = 0; i < priv->num_rx_ring[q]; i++) {
+		priv->rx_skb[q][i] = NULL;
+		skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+		if (!skb)
+			break;
+		ravb_set_buffer_align(skb);
+		/* RX descriptor */
+		rx_desc = &priv->rx_ring[q][i];
+		/* The size of the buffer should be on 16-byte boundary. */
+		rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+		dma_addr = dma_map_single(&ndev->dev, skb->data,
+					  ALIGN(PKT_BUF_SZ, 16),
+					  DMA_FROM_DEVICE);
+		if (dma_mapping_error(&ndev->dev, dma_addr)) {
+			dev_kfree_skb(skb);
+			break;
+		}
+		priv->rx_skb[q][i] = skb;
+		rx_desc->dptr = cpu_to_le32(dma_addr);
+		rx_desc->die_dt = DT_FEMPTY;
+	}
+	rx_desc = &priv->rx_ring[q][i];
+	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+	rx_desc->die_dt = DT_LINKFIX; /* type */
+	priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
+
+	memset(priv->tx_ring[q], 0, tx_ring_size);
+	/* Build TX ring buffer */
+	for (i = 0; i < priv->num_tx_ring[q]; i++) {
+		priv->tx_skb[q][i] = NULL;
+		priv->tx_buffers[q][i] = NULL;
+		buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+		if (!buffer)
+			break;
+		/* Aligned TX buffer */
+		priv->tx_buffers[q][i] = buffer;
+		tx_desc = &priv->tx_ring[q][i];
+		tx_desc->die_dt = DT_EEMPTY;
+	}
+	tx_desc = &priv->tx_ring[q][i];
+	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+	tx_desc->die_dt = DT_LINKFIX; /* type */
+
+	/* RX descriptor base address for best effort */
+	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
+	desc->die_dt = DT_LINKFIX; /* type */
+	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+
+	/* TX descriptor base address for best effort */
+	desc = &priv->desc_bat[q];
+	desc->die_dt = DT_LINKFIX; /* type */
+	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+}
+
+/* Init skb and descriptor buffer for Ethernet AVB */
+static int ravb_ring_init(struct net_device *ndev, int q)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int ring_size;
+
+	/* Allocate RX and TX skb rings */
+	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
+				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
+				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
+	if (!priv->rx_skb[q] || !priv->tx_skb[q])
+		goto error;
+
+	/* Allocate rings for the aligned buffers */
+	priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
+				      sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
+	if (!priv->tx_buffers[q])
+		goto error;
+
+	/* Allocate all RX descriptors. */
+	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+	priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+					      &priv->rx_desc_dma[q],
+					      GFP_KERNEL);
+	if (!priv->rx_ring[q])
+		goto error;
+
+	priv->dirty_rx[q] = 0;
+
+	/* Allocate all TX descriptors. */
+	ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+	priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+					      &priv->tx_desc_dma[q],
+					      GFP_KERNEL);
+	if (!priv->tx_ring[q])
+		goto error;
+
+	return 0;
+
+error:
+	ravb_ring_free(ndev, q);
+
+	return -ENOMEM;
+}
+
+/* E-MAC init function */
+static void ravb_emac_init(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	u32 ecmr;
+
+	/* Receive frame limit set register */
+	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+
+	/* PAUSE prohibition */
+	ecmr =  ravb_read(ndev, ECMR);
+	ecmr &= ECMR_DM;
+	ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+	ravb_write(ndev, ecmr, ECMR);
+
+	ravb_set_rate(ndev);
+
+	/* Set MAC address */
+	ravb_write(ndev,
+		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
+	ravb_write(ndev,
+		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
+
+	ravb_write(ndev, 1, MPR);
+
+	/* E-MAC status register clear */
+	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
+
+	/* E-MAC interrupt enable register */
+	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
+}
+
+/* Device init function for Ethernet AVB */
+static int ravb_dmac_init(struct net_device *ndev)
+{
+	int error;
+
+	/* Set CONFIG mode */
+	error = ravb_config(ndev);
+	if (error)
+		return error;
+
+	error = ravb_ring_init(ndev, RAVB_BE);
+	if (error)
+		return error;
+	error = ravb_ring_init(ndev, RAVB_NC);
+	if (error) {
+		ravb_ring_free(ndev, RAVB_BE);
+		return error;
+	}
+
+	/* Descriptor format */
+	ravb_ring_format(ndev, RAVB_BE);
+	ravb_ring_format(ndev, RAVB_NC);
+
+#if defined(__LITTLE_ENDIAN)
+	ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+#else
+	ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+#endif
+
+	/* Set AVB RX */
+	ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+
+	/* Set FIFO size */
+	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+
+	/* Timestamp enable */
+	ravb_write(ndev, TCCR_TFEN, TCCR);
+
+	/* Interrupt enable: */
+	/* Frame receive */
+	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
+	/* Receive FIFO full warning */
+	ravb_write(ndev, RIC1_RFWE, RIC1);
+	/* Receive FIFO full error, descriptor empty */
+	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
+	/* Frame transmitted, timestamp FIFO updated */
+	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+	/* Setting the control will start the AVB-DMAC process. */
+	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
+		   CCC);
+
+	return 0;
+}
+
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &priv->stats[q];
+	struct ravb_tx_desc *desc;
+	int free_num = 0;
+	int entry = 0;
+	u32 size;
+
+	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+		entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+		desc = &priv->tx_ring[q][entry];
+		if (desc->die_dt != DT_FEMPTY)
+			break;
+		/* Descriptor type must be checked before all other reads */
+		dma_rmb();
+		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+		/* Free the original skb. */
+		if (priv->tx_skb[q][entry]) {
+			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+					 size, DMA_TO_DEVICE);
+			dev_kfree_skb_any(priv->tx_skb[q][entry]);
+			priv->tx_skb[q][entry] = NULL;
+			free_num++;
+		}
+		stats->tx_packets++;
+		stats->tx_bytes += size;
+		desc->die_dt = DT_EEMPTY;
+	}
+	return free_num;
+}
+
+static void ravb_get_tx_tstamp(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+	struct skb_shared_hwtstamps shhwtstamps;
+	struct sk_buff *skb;
+	struct timespec64 ts;
+	u16 tag, tfa_tag;
+	int count;
+	u32 tfa2;
+
+	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
+	while (count--) {
+		tfa2 = ravb_read(ndev, TFA2);
+		tfa_tag = (tfa2 & TFA2_TST) >> 16;
+		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
+		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
+			    ravb_read(ndev, TFA1);
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
+					 list) {
+			skb = ts_skb->skb;
+			tag = ts_skb->tag;
+			list_del(&ts_skb->list);
+			kfree(ts_skb);
+			if (tag == tfa_tag) {
+				skb_tstamp_tx(skb, &shhwtstamps);
+				break;
+			}
+		}
+		ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+	}
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
+			priv->cur_rx[q];
+	struct net_device_stats *stats = &priv->stats[q];
+	struct ravb_ex_rx_desc *desc;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	struct timespec64 ts;
+	u16 pkt_len = 0;
+	u8  desc_status;
+	int limit;
+
+	boguscnt = min(boguscnt, *quota);
+	limit = boguscnt;
+	desc = &priv->rx_ring[q][entry];
+	while (desc->die_dt != DT_FEMPTY) {
+		/* Descriptor type must be checked before all other reads */
+		dma_rmb();
+		desc_status = desc->msc;
+		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+		if (--boguscnt < 0)
+			break;
+
+		if (desc_status & MSC_MC)
+			stats->multicast++;
+
+		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
+				   MSC_CEEF)) {
+			stats->rx_errors++;
+			if (desc_status & MSC_CRC)
+				stats->rx_crc_errors++;
+			if (desc_status & MSC_RFE)
+				stats->rx_frame_errors++;
+			if (desc_status & (MSC_RTLF | MSC_RTSF))
+				stats->rx_length_errors++;
+			if (desc_status & MSC_CEEF)
+				stats->rx_missed_errors++;
+		} else {
+			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
+
+			skb = priv->rx_skb[q][entry];
+			priv->rx_skb[q][entry] = NULL;
+			dma_sync_single_for_cpu(&ndev->dev,
+						le32_to_cpu(desc->dptr),
+						ALIGN(PKT_BUF_SZ, 16),
+						DMA_FROM_DEVICE);
+			get_ts &= (q == RAVB_NC) ?
+					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
+					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+			if (get_ts) {
+				struct skb_shared_hwtstamps *shhwtstamps;
+
+				shhwtstamps = skb_hwtstamps(skb);
+				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
+					     32) | le32_to_cpu(desc->ts_sl);
+				ts.tv_nsec = le32_to_cpu(desc->ts_n);
+				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+			}
+			skb_put(skb, pkt_len);
+			skb->protocol = eth_type_trans(skb, ndev);
+			napi_gro_receive(&priv->napi[q], skb);
+			stats->rx_packets++;
+			stats->rx_bytes += pkt_len;
+		}
+
+		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+		desc = &priv->rx_ring[q][entry];
+	}
+
+	/* Refill the RX ring buffers. */
+	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+		desc = &priv->rx_ring[q][entry];
+		/* The size of the buffer should be on 16-byte boundary. */
+		desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+
+		if (!priv->rx_skb[q][entry]) {
+			skb = netdev_alloc_skb(ndev,
+					       PKT_BUF_SZ + RAVB_ALIGN - 1);
+			if (!skb)
+				break;	/* Better luck next round. */
+			ravb_set_buffer_align(skb);
+			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+					 ALIGN(PKT_BUF_SZ, 16),
+					 DMA_FROM_DEVICE);
+			dma_addr = dma_map_single(&ndev->dev, skb->data,
+						  le16_to_cpu(desc->ds_cc),
+						  DMA_FROM_DEVICE);
+			skb_checksum_none_assert(skb);
+			if (dma_mapping_error(&ndev->dev, dma_addr)) {
+				dev_kfree_skb_any(skb);
+				break;
+			}
+			desc->dptr = cpu_to_le32(dma_addr);
+			priv->rx_skb[q][entry] = skb;
+		}
+		/* Descriptor type must be set after all the above writes */
+		dma_wmb();
+		desc->die_dt = DT_FEMPTY;
+	}
+
+	*quota -= limit - (++boguscnt);
+
+	return boguscnt <= 0;
+}
+
+static void ravb_rcv_snd_disable(struct net_device *ndev)
+{
+	/* Disable TX and RX */
+	ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void ravb_rcv_snd_enable(struct net_device *ndev)
+{
+	/* Enable TX and RX */
+	ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+}
+
+/* function for waiting dma process finished */
+static int ravb_stop_dma(struct net_device *ndev)
+{
+	int error;
+
+	/* Wait for stopping the hardware TX process */
+	error = ravb_wait(ndev, TCCR,
+			  TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+	if (error)
+		return error;
+
+	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
+			  0);
+	if (error)
+		return error;
+
+	/* Stop the E-MAC's RX/TX processes. */
+	ravb_rcv_snd_disable(ndev);
+
+	/* Wait for stopping the RX DMA process */
+	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
+	if (error)
+		return error;
+
+	/* Stop AVB-DMAC process */
+	return ravb_config(ndev);
+}
+
+/* E-MAC interrupt handler */
+static void ravb_emac_interrupt(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	u32 ecsr, psr;
+
+	ecsr = ravb_read(ndev, ECSR);
+	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
+	if (ecsr & ECSR_ICD)
+		ndev->stats.tx_carrier_errors++;
+	if (ecsr & ECSR_LCHNG) {
+		/* Link changed */
+		if (priv->no_avb_link)
+			return;
+		psr = ravb_read(ndev, PSR);
+		if (priv->avb_link_active_low)
+			psr ^= PSR_LMON;
+		if (!(psr & PSR_LMON)) {
+			/* DIsable RX and TX */
+			ravb_rcv_snd_disable(ndev);
+		} else {
+			/* Enable RX and TX */
+			ravb_rcv_snd_enable(ndev);
+		}
+	}
+}
+
+/* Error interrupt handler */
+static void ravb_error_interrupt(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	u32 eis, ris2;
+
+	eis = ravb_read(ndev, EIS);
+	ravb_write(ndev, ~EIS_QFS, EIS);
+	if (eis & EIS_QFS) {
+		ris2 = ravb_read(ndev, RIS2);
+		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+
+		/* Receive Descriptor Empty int */
+		if (ris2 & RIS2_QFF0)
+			priv->stats[RAVB_BE].rx_over_errors++;
+
+		    /* Receive Descriptor Empty int */
+		if (ris2 & RIS2_QFF1)
+			priv->stats[RAVB_NC].rx_over_errors++;
+
+		/* Receive FIFO Overflow int */
+		if (ris2 & RIS2_RFFF)
+			priv->rx_fifo_errors++;
+	}
+}
+
+static irqreturn_t ravb_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct ravb_private *priv = netdev_priv(ndev);
+	irqreturn_t result = IRQ_NONE;
+	u32 iss;
+
+	spin_lock(&priv->lock);
+	/* Get interrupt status */
+	iss = ravb_read(ndev, ISS);
+
+	/* Received and transmitted interrupts */
+	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
+		u32 ris0 = ravb_read(ndev, RIS0);
+		u32 ric0 = ravb_read(ndev, RIC0);
+		u32 tis  = ravb_read(ndev, TIS);
+		u32 tic  = ravb_read(ndev, TIC);
+		int q;
+
+		/* Timestamp updated */
+		if (tis & TIS_TFUF) {
+			ravb_write(ndev, ~TIS_TFUF, TIS);
+			ravb_get_tx_tstamp(ndev);
+			result = IRQ_HANDLED;
+		}
+
+		/* Network control and best effort queue RX/TX */
+		for (q = RAVB_NC; q >= RAVB_BE; q--) {
+			if (((ris0 & ric0) & BIT(q)) ||
+			    ((tis  & tic)  & BIT(q))) {
+				if (napi_schedule_prep(&priv->napi[q])) {
+					/* Mask RX and TX interrupts */
+					ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+					ravb_write(ndev, tic  & ~BIT(q), TIC);
+					__napi_schedule(&priv->napi[q]);
+				} else {
+					netdev_warn(ndev,
+						    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+						    ris0, ric0);
+					netdev_warn(ndev,
+						    "                    tx status 0x%08x, tx mask 0x%08x.\n",
+						    tis, tic);
+				}
+				result = IRQ_HANDLED;
+			}
+		}
+	}
+
+	/* E-MAC status summary */
+	if (iss & ISS_MS) {
+		ravb_emac_interrupt(ndev);
+		result = IRQ_HANDLED;
+	}
+
+	/* Error status summary */
+	if (iss & ISS_ES) {
+		ravb_error_interrupt(ndev);
+		result = IRQ_HANDLED;
+	}
+
+	if (iss & ISS_CGIS)
+		result = ravb_ptp_interrupt(ndev);
+
+	mmiowb();
+	spin_unlock(&priv->lock);
+	return result;
+}
+
+static int ravb_poll(struct napi_struct *napi, int budget)
+{
+	struct net_device *ndev = napi->dev;
+	struct ravb_private *priv = netdev_priv(ndev);
+	unsigned long flags;
+	int q = napi - priv->napi;
+	int mask = BIT(q);
+	int quota = budget;
+	u32 ris0, tis;
+
+	for (;;) {
+		tis = ravb_read(ndev, TIS);
+		ris0 = ravb_read(ndev, RIS0);
+		if (!((ris0 & mask) || (tis & mask)))
+			break;
+
+		/* Processing RX Descriptor Ring */
+		if (ris0 & mask) {
+			/* Clear RX interrupt */
+			ravb_write(ndev, ~mask, RIS0);
+			if (ravb_rx(ndev, &quota, q))
+				goto out;
+		}
+		/* Processing TX Descriptor Ring */
+		if (tis & mask) {
+			spin_lock_irqsave(&priv->lock, flags);
+			/* Clear TX interrupt */
+			ravb_write(ndev, ~mask, TIS);
+			ravb_tx_free(ndev, q);
+			netif_wake_subqueue(ndev, q);
+			mmiowb();
+			spin_unlock_irqrestore(&priv->lock, flags);
+		}
+	}
+
+	napi_complete(napi);
+
+	/* Re-enable RX/TX interrupts */
+	spin_lock_irqsave(&priv->lock, flags);
+	ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
+	ravb_write(ndev, ravb_read(ndev, TIC)  | mask,  TIC);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* Receive error message handling */
+	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
+	priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+	if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+		ndev->stats.rx_over_errors = priv->rx_over_errors;
+		netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
+	}
+	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+		netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
+	}
+out:
+	return budget - quota;
+}
+
+/* PHY state control function */
+static void ravb_adjust_link(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev = priv->phydev;
+	bool new_state = false;
+
+	if (phydev->link) {
+		if (phydev->duplex != priv->duplex) {
+			new_state = true;
+			priv->duplex = phydev->duplex;
+			ravb_set_duplex(ndev);
+		}
+
+		if (phydev->speed != priv->speed) {
+			new_state = true;
+			priv->speed = phydev->speed;
+			ravb_set_rate(ndev);
+		}
+		if (!priv->link) {
+			ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
+				   ECMR);
+			new_state = true;
+			priv->link = phydev->link;
+			if (priv->no_avb_link)
+				ravb_rcv_snd_enable(ndev);
+		}
+	} else if (priv->link) {
+		new_state = true;
+		priv->link = 0;
+		priv->speed = 0;
+		priv->duplex = -1;
+		if (priv->no_avb_link)
+			ravb_rcv_snd_disable(ndev);
+	}
+
+	if (new_state && netif_msg_link(priv))
+		phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int ravb_phy_init(struct net_device *ndev)
+{
+	struct device_node *np = ndev->dev.parent->of_node;
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev;
+	struct device_node *pn;
+
+	priv->link = 0;
+	priv->speed = 0;
+	priv->duplex = -1;
+
+	/* Try connecting to PHY */
+	pn = of_parse_phandle(np, "phy-handle", 0);
+	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
+				priv->phy_interface);
+	if (!phydev) {
+		netdev_err(ndev, "failed to connect PHY\n");
+		return -ENOENT;
+	}
+
+	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+		    phydev->addr, phydev->irq, phydev->drv->name);
+
+	priv->phydev = phydev;
+
+	return 0;
+}
+
+/* PHY control start function */
+static int ravb_phy_start(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int error;
+
+	error = ravb_phy_init(ndev);
+	if (error)
+		return error;
+
+	phy_start(priv->phydev);
+
+	return 0;
+}
+
+static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int error = -ENODEV;
+	unsigned long flags;
+
+	if (priv->phydev) {
+		spin_lock_irqsave(&priv->lock, flags);
+		error = phy_ethtool_gset(priv->phydev, ecmd);
+		spin_unlock_irqrestore(&priv->lock, flags);
+	}
+
+	return error;
+}
+
+static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	unsigned long flags;
+	int error;
+
+	if (!priv->phydev)
+		return -ENODEV;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* Disable TX and RX */
+	ravb_rcv_snd_disable(ndev);
+
+	error = phy_ethtool_sset(priv->phydev, ecmd);
+	if (error)
+		goto error_exit;
+
+	if (ecmd->duplex == DUPLEX_FULL)
+		priv->duplex = 1;
+	else
+		priv->duplex = 0;
+
+	ravb_set_duplex(ndev);
+
+error_exit:
+	mdelay(1);
+
+	/* Enable TX and RX */
+	ravb_rcv_snd_enable(ndev);
+
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return error;
+}
+
+static int ravb_nway_reset(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int error = -ENODEV;
+	unsigned long flags;
+
+	if (priv->phydev) {
+		spin_lock_irqsave(&priv->lock, flags);
+		error = phy_start_aneg(priv->phydev);
+		spin_unlock_irqrestore(&priv->lock, flags);
+	}
+
+	return error;
+}
+
+static u32 ravb_get_msglevel(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void ravb_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	priv->msg_enable = value;
+}
+
+static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"rx_queue_0_current",
+	"tx_queue_0_current",
+	"rx_queue_0_dirty",
+	"tx_queue_0_dirty",
+	"rx_queue_0_packets",
+	"tx_queue_0_packets",
+	"rx_queue_0_bytes",
+	"tx_queue_0_bytes",
+	"rx_queue_0_mcast_packets",
+	"rx_queue_0_errors",
+	"rx_queue_0_crc_errors",
+	"rx_queue_0_frame_errors",
+	"rx_queue_0_length_errors",
+	"rx_queue_0_missed_errors",
+	"rx_queue_0_over_errors",
+
+	"rx_queue_1_current",
+	"tx_queue_1_current",
+	"rx_queue_1_dirty",
+	"tx_queue_1_dirty",
+	"rx_queue_1_packets",
+	"tx_queue_1_packets",
+	"rx_queue_1_bytes",
+	"tx_queue_1_bytes",
+	"rx_queue_1_mcast_packets",
+	"rx_queue_1_errors",
+	"rx_queue_1_crc_errors",
+	"rx_queue_1_frame_errors_",
+	"rx_queue_1_length_errors",
+	"rx_queue_1_missed_errors",
+	"rx_queue_1_over_errors",
+};
+
+#define RAVB_STATS_LEN	ARRAY_SIZE(ravb_gstrings_stats)
+
+static int ravb_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return RAVB_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void ravb_get_ethtool_stats(struct net_device *ndev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int i = 0;
+	int q;
+
+	/* Device-specific stats */
+	for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+		struct net_device_stats *stats = &priv->stats[q];
+
+		data[i++] = priv->cur_rx[q];
+		data[i++] = priv->cur_tx[q];
+		data[i++] = priv->dirty_rx[q];
+		data[i++] = priv->dirty_tx[q];
+		data[i++] = stats->rx_packets;
+		data[i++] = stats->tx_packets;
+		data[i++] = stats->rx_bytes;
+		data[i++] = stats->tx_bytes;
+		data[i++] = stats->multicast;
+		data[i++] = stats->rx_errors;
+		data[i++] = stats->rx_crc_errors;
+		data[i++] = stats->rx_frame_errors;
+		data[i++] = stats->rx_length_errors;
+		data[i++] = stats->rx_missed_errors;
+		data[i++] = stats->rx_over_errors;
+	}
+}
+
+static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+		break;
+	}
+}
+
+static void ravb_get_ringparam(struct net_device *ndev,
+			       struct ethtool_ringparam *ring)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	ring->rx_max_pending = BE_RX_RING_MAX;
+	ring->tx_max_pending = BE_TX_RING_MAX;
+	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
+	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
+}
+
+static int ravb_set_ringparam(struct net_device *ndev,
+			      struct ethtool_ringparam *ring)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int error;
+
+	if (ring->tx_pending > BE_TX_RING_MAX ||
+	    ring->rx_pending > BE_RX_RING_MAX ||
+	    ring->tx_pending < BE_TX_RING_MIN ||
+	    ring->rx_pending < BE_RX_RING_MIN)
+		return -EINVAL;
+	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+		return -EINVAL;
+
+	if (netif_running(ndev)) {
+		netif_device_detach(ndev);
+		/* Stop PTP Clock driver */
+		ravb_ptp_stop(ndev);
+		/* Wait for DMA stopping */
+		error = ravb_stop_dma(ndev);
+		if (error) {
+			netdev_err(ndev,
+				   "cannot set ringparam! Any AVB processes are still running?\n");
+			return error;
+		}
+		synchronize_irq(ndev->irq);
+
+		/* Free all the skb's in the RX queue and the DMA buffers. */
+		ravb_ring_free(ndev, RAVB_BE);
+		ravb_ring_free(ndev, RAVB_NC);
+	}
+
+	/* Set new parameters */
+	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
+	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
+
+	if (netif_running(ndev)) {
+		error = ravb_dmac_init(ndev);
+		if (error) {
+			netdev_err(ndev,
+				   "%s: ravb_dmac_init() failed, error %d\n",
+				   __func__, error);
+			return error;
+		}
+
+		ravb_emac_init(ndev);
+
+		/* Initialise PTP Clock driver */
+		ravb_ptp_init(ndev, priv->pdev);
+
+		netif_device_attach(ndev);
+	}
+
+	return 0;
+}
+
+static int ravb_get_ts_info(struct net_device *ndev,
+			    struct ethtool_ts_info *info)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE |
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+		(1 << HWTSTAMP_FILTER_ALL);
+	info->phc_index = ptp_clock_index(priv->ptp.clock);
+
+	return 0;
+}
+
+static const struct ethtool_ops ravb_ethtool_ops = {
+	.get_settings		= ravb_get_settings,
+	.set_settings		= ravb_set_settings,
+	.nway_reset		= ravb_nway_reset,
+	.get_msglevel		= ravb_get_msglevel,
+	.set_msglevel		= ravb_set_msglevel,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= ravb_get_strings,
+	.get_ethtool_stats	= ravb_get_ethtool_stats,
+	.get_sset_count		= ravb_get_sset_count,
+	.get_ringparam		= ravb_get_ringparam,
+	.set_ringparam		= ravb_set_ringparam,
+	.get_ts_info		= ravb_get_ts_info,
+};
+
+/* Network device open function for Ethernet AVB */
+static int ravb_open(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	int error;
+
+	napi_enable(&priv->napi[RAVB_BE]);
+	napi_enable(&priv->napi[RAVB_NC]);
+
+	error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
+			    ndev);
+	if (error) {
+		netdev_err(ndev, "cannot request IRQ\n");
+		goto out_napi_off;
+	}
+
+	/* Device init */
+	error = ravb_dmac_init(ndev);
+	if (error)
+		goto out_free_irq;
+	ravb_emac_init(ndev);
+
+	/* Initialise PTP Clock driver */
+	ravb_ptp_init(ndev, priv->pdev);
+
+	netif_tx_start_all_queues(ndev);
+
+	/* PHY control start */
+	error = ravb_phy_start(ndev);
+	if (error)
+		goto out_ptp_stop;
+
+	return 0;
+
+out_ptp_stop:
+	/* Stop PTP Clock driver */
+	ravb_ptp_stop(ndev);
+out_free_irq:
+	free_irq(ndev->irq, ndev);
+out_napi_off:
+	napi_disable(&priv->napi[RAVB_NC]);
+	napi_disable(&priv->napi[RAVB_BE]);
+	return error;
+}
+
+/* Timeout function for Ethernet AVB */
+static void ravb_tx_timeout(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	netif_err(priv, tx_err, ndev,
+		  "transmit timed out, status %08x, resetting...\n",
+		  ravb_read(ndev, ISS));
+
+	/* tx_errors count up */
+	ndev->stats.tx_errors++;
+
+	schedule_work(&priv->work);
+}
+
+static void ravb_tx_timeout_work(struct work_struct *work)
+{
+	struct ravb_private *priv = container_of(work, struct ravb_private,
+						 work);
+	struct net_device *ndev = priv->ndev;
+
+	netif_tx_stop_all_queues(ndev);
+
+	/* Stop PTP Clock driver */
+	ravb_ptp_stop(ndev);
+
+	/* Wait for DMA stopping */
+	ravb_stop_dma(ndev);
+
+	ravb_ring_free(ndev, RAVB_BE);
+	ravb_ring_free(ndev, RAVB_NC);
+
+	/* Device init */
+	ravb_dmac_init(ndev);
+	ravb_emac_init(ndev);
+
+	/* Initialise PTP Clock driver */
+	ravb_ptp_init(ndev, priv->pdev);
+
+	netif_tx_start_all_queues(ndev);
+}
+
+/* Packet transmit function for Ethernet AVB */
+static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct ravb_tstamp_skb *ts_skb = NULL;
+	u16 q = skb_get_queue_mapping(skb);
+	struct ravb_tx_desc *desc;
+	unsigned long flags;
+	u32 dma_addr;
+	void *buffer;
+	u32 entry;
+	u32 tccr;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+		netif_err(priv, tx_queued, ndev,
+			  "still transmitting with the full ring!\n");
+		netif_stop_subqueue(ndev, q);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+	entry = priv->cur_tx[q] % priv->num_tx_ring[q];
+	priv->tx_skb[q][entry] = skb;
+
+	if (skb_put_padto(skb, ETH_ZLEN))
+		goto drop;
+
+	buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
+	memcpy(buffer, skb->data, skb->len);
+	desc = &priv->tx_ring[q][entry];
+	desc->ds_tagl = cpu_to_le16(skb->len);
+	dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&ndev->dev, dma_addr))
+		goto drop;
+	desc->dptr = cpu_to_le32(dma_addr);
+
+	/* TX timestamp required */
+	if (q == RAVB_NC) {
+		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+		if (!ts_skb) {
+			dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+					 DMA_TO_DEVICE);
+			goto drop;
+		}
+		ts_skb->skb = skb;
+		ts_skb->tag = priv->ts_skb_tag++;
+		priv->ts_skb_tag &= 0x3ff;
+		list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+		/* TAG and timestamp required flag */
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		skb_tx_timestamp(skb);
+		desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+		desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+	}
+
+	/* Descriptor type must be set after all the above writes */
+	dma_wmb();
+	desc->die_dt = DT_FSINGLE;
+
+	tccr = ravb_read(ndev, TCCR);
+	if (!(tccr & (TCCR_TSRQ0 << q)))
+		ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+
+	priv->cur_tx[q]++;
+	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
+	    !ravb_tx_free(ndev, q))
+		netif_stop_subqueue(ndev, q);
+
+exit:
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return NETDEV_TX_OK;
+
+drop:
+	dev_kfree_skb_any(skb);
+	priv->tx_skb[q][entry] = NULL;
+	goto exit;
+}
+
+static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
+			     void *accel_priv, select_queue_fallback_t fallback)
+{
+	/* If skb needs TX timestamp, it is handled in network control queue */
+	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
+							       RAVB_BE;
+
+}
+
+static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct net_device_stats *nstats, *stats0, *stats1;
+
+	nstats = &ndev->stats;
+	stats0 = &priv->stats[RAVB_BE];
+	stats1 = &priv->stats[RAVB_NC];
+
+	nstats->tx_dropped += ravb_read(ndev, TROCR);
+	ravb_write(ndev, 0, TROCR);	/* (write clear) */
+	nstats->collisions += ravb_read(ndev, CDCR);
+	ravb_write(ndev, 0, CDCR);	/* (write clear) */
+	nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
+	ravb_write(ndev, 0, LCCR);	/* (write clear) */
+
+	nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
+	ravb_write(ndev, 0, CERCR);	/* (write clear) */
+	nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
+	ravb_write(ndev, 0, CEECR);	/* (write clear) */
+
+	nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
+	nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
+	nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
+	nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
+	nstats->multicast = stats0->multicast + stats1->multicast;
+	nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
+	nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
+	nstats->rx_frame_errors =
+		stats0->rx_frame_errors + stats1->rx_frame_errors;
+	nstats->rx_length_errors =
+		stats0->rx_length_errors + stats1->rx_length_errors;
+	nstats->rx_missed_errors =
+		stats0->rx_missed_errors + stats1->rx_missed_errors;
+	nstats->rx_over_errors =
+		stats0->rx_over_errors + stats1->rx_over_errors;
+
+	return nstats;
+}
+
+/* Update promiscuous bit */
+static void ravb_set_rx_mode(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	unsigned long flags;
+	u32 ecmr;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	ecmr = ravb_read(ndev, ECMR);
+	if (ndev->flags & IFF_PROMISC)
+		ecmr |=  ECMR_PRM;
+	else
+		ecmr &= ~ECMR_PRM;
+	ravb_write(ndev, ecmr, ECMR);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Device close function for Ethernet AVB */
+static int ravb_close(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+
+	netif_tx_stop_all_queues(ndev);
+
+	/* Disable interrupts by clearing the interrupt masks. */
+	ravb_write(ndev, 0, RIC0);
+	ravb_write(ndev, 0, RIC1);
+	ravb_write(ndev, 0, RIC2);
+	ravb_write(ndev, 0, TIC);
+
+	/* Stop PTP Clock driver */
+	ravb_ptp_stop(ndev);
+
+	/* Set the config mode to stop the AVB-DMAC's processes */
+	if (ravb_stop_dma(ndev) < 0)
+		netdev_err(ndev,
+			   "device will be stopped after h/w processes are done.\n");
+
+	/* Clear the timestamp list */
+	list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+		list_del(&ts_skb->list);
+		kfree(ts_skb);
+	}
+
+	/* PHY disconnect */
+	if (priv->phydev) {
+		phy_stop(priv->phydev);
+		phy_disconnect(priv->phydev);
+		priv->phydev = NULL;
+	}
+
+	free_irq(ndev->irq, ndev);
+
+	napi_disable(&priv->napi[RAVB_NC]);
+	napi_disable(&priv->napi[RAVB_BE]);
+
+	/* Free all the skb's in the RX queue and the DMA buffers. */
+	ravb_ring_free(ndev, RAVB_BE);
+	ravb_ring_free(ndev, RAVB_NC);
+
+	return 0;
+}
+
+static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct hwtstamp_config config;
+
+	config.flags = 0;
+	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+						HWTSTAMP_TX_OFF;
+	if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+	else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+	else
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+/* Control hardware time stamping */
+static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct hwtstamp_config config;
+	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
+	u32 tstamp_tx_ctrl;
+
+	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* Reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		tstamp_tx_ctrl = 0;
+		break;
+	case HWTSTAMP_TX_ON:
+		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		tstamp_rx_ctrl = 0;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+		break;
+	default:
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+	}
+
+	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+/* ioctl to device function */
+static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev = priv->phydev;
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	switch (cmd) {
+	case SIOCGHWTSTAMP:
+		return ravb_hwtstamp_get(ndev, req);
+	case SIOCSHWTSTAMP:
+		return ravb_hwtstamp_set(ndev, req);
+	}
+
+	return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static const struct net_device_ops ravb_netdev_ops = {
+	.ndo_open		= ravb_open,
+	.ndo_stop		= ravb_close,
+	.ndo_start_xmit		= ravb_start_xmit,
+	.ndo_select_queue	= ravb_select_queue,
+	.ndo_get_stats		= ravb_get_stats,
+	.ndo_set_rx_mode	= ravb_set_rx_mode,
+	.ndo_tx_timeout		= ravb_tx_timeout,
+	.ndo_do_ioctl		= ravb_do_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+	struct platform_device *pdev = priv->pdev;
+	struct device *dev = &pdev->dev;
+	int error;
+
+	/* Bitbang init */
+	priv->mdiobb.ops = &bb_ops;
+
+	/* MII controller setting */
+	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+	if (!priv->mii_bus)
+		return -ENOMEM;
+
+	/* Hook up MII support for ethtool */
+	priv->mii_bus->name = "ravb_mii";
+	priv->mii_bus->parent = dev;
+	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		 pdev->name, pdev->id);
+
+	/* Register MDIO bus */
+	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+	if (error)
+		goto out_free_bus;
+
+	return 0;
+
+out_free_bus:
+	free_mdio_bitbang(priv->mii_bus);
+	return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+	/* Unregister mdio bus */
+	mdiobus_unregister(priv->mii_bus);
+
+	/* Free bitbang info */
+	free_mdio_bitbang(priv->mii_bus);
+
+	return 0;
+}
+
+static int ravb_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct ravb_private *priv;
+	struct net_device *ndev;
+	int error, irq, q;
+	struct resource *res;
+
+	if (!np) {
+		dev_err(&pdev->dev,
+			"this driver is required to be instantiated from device tree\n");
+		return -EINVAL;
+	}
+
+	/* Get base address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "invalid resource\n");
+		return -EINVAL;
+	}
+
+	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
+				  NUM_TX_QUEUE, NUM_RX_QUEUE);
+	if (!ndev)
+		return -ENOMEM;
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	/* The Ether-specific entries in the device structure. */
+	ndev->base_addr = res->start;
+	ndev->dma = -1;
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		error = -ENODEV;
+		goto out_release;
+	}
+	ndev->irq = irq;
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+	priv->pdev = pdev;
+	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
+	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
+	priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+	priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+	priv->addr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->addr)) {
+		error = PTR_ERR(priv->addr);
+		goto out_release;
+	}
+
+	spin_lock_init(&priv->lock);
+	INIT_WORK(&priv->work, ravb_tx_timeout_work);
+
+	priv->phy_interface = of_get_phy_mode(np);
+
+	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
+	priv->avb_link_active_low =
+		of_property_read_bool(np, "renesas,ether-link-active-low");
+
+	/* Set function */
+	ndev->netdev_ops = &ravb_netdev_ops;
+	ndev->ethtool_ops = &ravb_ethtool_ops;
+
+	/* Set AVB config mode */
+	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+		   CCC);
+
+	/* Set CSEL value */
+	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
+		   CCC);
+
+	/* Set GTI value */
+	ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
+
+	/* Request GTI loading */
+	ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+
+	/* Allocate descriptor base address table */
+	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+	priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+					    &priv->desc_bat_dma, GFP_KERNEL);
+	if (!priv->desc_bat) {
+		dev_err(&ndev->dev,
+			"Cannot allocate desc base address table (size %d bytes)\n",
+			priv->desc_bat_size);
+		error = -ENOMEM;
+		goto out_release;
+	}
+	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+		priv->desc_bat[q].die_dt = DT_EOS;
+	ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+	/* Initialise HW timestamp list */
+	INIT_LIST_HEAD(&priv->ts_skb_list);
+
+	/* Debug message level */
+	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+
+	/* Read and set MAC address */
+	ravb_read_mac_address(ndev, of_get_mac_address(np));
+	if (!is_valid_ether_addr(ndev->dev_addr)) {
+		dev_warn(&pdev->dev,
+			 "no valid MAC address supplied, using a random one\n");
+		eth_hw_addr_random(ndev);
+	}
+
+	/* MDIO bus init */
+	error = ravb_mdio_init(priv);
+	if (error) {
+		dev_err(&ndev->dev, "failed to initialize MDIO\n");
+		goto out_dma_free;
+	}
+
+	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+	netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+
+	/* Network device register */
+	error = register_netdev(ndev);
+	if (error)
+		goto out_napi_del;
+
+	/* Print device information */
+	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
+		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+	platform_set_drvdata(pdev, ndev);
+
+	return 0;
+
+out_napi_del:
+	netif_napi_del(&priv->napi[RAVB_NC]);
+	netif_napi_del(&priv->napi[RAVB_BE]);
+	ravb_mdio_release(priv);
+out_dma_free:
+	dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+			  priv->desc_bat_dma);
+out_release:
+	if (ndev)
+		free_netdev(ndev);
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return error;
+}
+
+static int ravb_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+			  priv->desc_bat_dma);
+	/* Set reset mode */
+	ravb_write(ndev, CCC_OPC_RESET, CCC);
+	pm_runtime_put_sync(&pdev->dev);
+	unregister_netdev(ndev);
+	netif_napi_del(&priv->napi[RAVB_NC]);
+	netif_napi_del(&priv->napi[RAVB_BE]);
+	ravb_mdio_release(priv);
+	pm_runtime_disable(&pdev->dev);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ravb_runtime_nop(struct device *dev)
+{
+	/* Runtime PM callback shared between ->runtime_suspend()
+	 * and ->runtime_resume(). Simply returns success.
+	 *
+	 * This driver re-initializes all registers after
+	 * pm_runtime_get_sync() anyway so there is no need
+	 * to save and restore registers here.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops ravb_dev_pm_ops = {
+	.runtime_suspend = ravb_runtime_nop,
+	.runtime_resume = ravb_runtime_nop,
+};
+
+#define RAVB_PM_OPS (&ravb_dev_pm_ops)
+#else
+#define RAVB_PM_OPS NULL
+#endif
+
+static const struct of_device_id ravb_match_table[] = {
+	{ .compatible = "renesas,etheravb-r8a7790" },
+	{ .compatible = "renesas,etheravb-r8a7794" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
+static struct platform_driver ravb_driver = {
+	.probe		= ravb_probe,
+	.remove		= ravb_remove,
+	.driver = {
+		.name	= "ravb",
+		.pm	= RAVB_PM_OPS,
+		.of_match_table = ravb_match_table,
+	},
+};
+
+module_platform_driver(ravb_driver);
+
+MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
+MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
new file mode 100644
index 0000000..42656da
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -0,0 +1,357 @@
+/* PTP 1588 clock using the Renesas Ethernet AVB
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ */
+
+#include "ravb.h"
+
+static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
+{
+	struct net_device *ndev = priv->ndev;
+	int error;
+
+	error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+	if (error)
+		return error;
+
+	ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+	return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
+{
+	struct net_device *ndev = priv->ndev;
+	int error;
+
+	error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
+	if (error)
+		return error;
+
+	ts->tv_nsec = ravb_read(ndev, GCT0);
+	ts->tv_sec  = ravb_read(ndev, GCT1) |
+		((s64)ravb_read(ndev, GCT2) << 32);
+
+	return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_write(struct ravb_private *priv,
+				const struct timespec64 *ts)
+{
+	struct net_device *ndev = priv->ndev;
+	int error;
+	u32 gccr;
+
+	error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
+	if (error)
+		return error;
+
+	gccr = ravb_read(ndev, GCCR);
+	if (gccr & GCCR_LTO)
+		return -EBUSY;
+	ravb_write(ndev, ts->tv_nsec, GTO0);
+	ravb_write(ndev, ts->tv_sec,  GTO1);
+	ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
+	ravb_write(ndev, gccr | GCCR_LTO, GCCR);
+
+	return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
+{
+	struct net_device *ndev = priv->ndev;
+	/* When the comparison value (GPTC.PTCV) is in range of
+	 * [x-1 to x+1] (x is the configured increment value in
+	 * GTI.TIV), it may happen that a comparison match is
+	 * not detected when the timer wraps around.
+	 */
+	u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
+	u32 gccr;
+
+	if (ns < gti_ns_plus_1)
+		ns = gti_ns_plus_1;
+	else if (ns > 0 - gti_ns_plus_1)
+		ns = 0 - gti_ns_plus_1;
+
+	gccr = ravb_read(ndev, GCCR);
+	if (gccr & GCCR_LPTC)
+		return -EBUSY;
+	ravb_write(ndev, ns, GPTC);
+	ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
+
+	return 0;
+}
+
+/* PTP clock operations */
+static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct ravb_private *priv = container_of(ptp, struct ravb_private,
+						 ptp.info);
+	struct net_device *ndev = priv->ndev;
+	unsigned long flags;
+	u32 diff, addend;
+	bool neg_adj = false;
+	u32 gccr;
+
+	if (ppb < 0) {
+		neg_adj = true;
+		ppb = -ppb;
+	}
+	addend = priv->ptp.default_addend;
+	diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
+
+	addend = neg_adj ? addend - diff : addend + diff;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	priv->ptp.current_addend = addend;
+
+	gccr = ravb_read(ndev, GCCR);
+	if (gccr & GCCR_LTI)
+		return -EBUSY;
+	ravb_write(ndev, addend & GTI_TIV, GTI);
+	ravb_write(ndev, gccr | GCCR_LTI, GCCR);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
+static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct ravb_private *priv = container_of(ptp, struct ravb_private,
+						 ptp.info);
+	struct timespec64 ts;
+	unsigned long flags;
+	int error;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	error = ravb_ptp_time_read(priv, &ts);
+	if (!error) {
+		u64 now = ktime_to_ns(timespec64_to_ktime(ts));
+
+		ts = ns_to_timespec64(now + delta);
+		error = ravb_ptp_time_write(priv, &ts);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return error;
+}
+
+static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	struct ravb_private *priv = container_of(ptp, struct ravb_private,
+						 ptp.info);
+	unsigned long flags;
+	int error;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	error = ravb_ptp_time_read(priv, ts);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return error;
+}
+
+static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
+			      const struct timespec64 *ts)
+{
+	struct ravb_private *priv = container_of(ptp, struct ravb_private,
+						 ptp.info);
+	unsigned long flags;
+	int error;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	error = ravb_ptp_time_write(priv, ts);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return error;
+}
+
+static int ravb_ptp_extts(struct ptp_clock_info *ptp,
+			  struct ptp_extts_request *req, int on)
+{
+	struct ravb_private *priv = container_of(ptp, struct ravb_private,
+						 ptp.info);
+	struct net_device *ndev = priv->ndev;
+	unsigned long flags;
+	u32 gic;
+
+	if (req->index)
+		return -EINVAL;
+
+	if (priv->ptp.extts[req->index] == on)
+		return 0;
+	priv->ptp.extts[req->index] = on;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	gic = ravb_read(ndev, GIC);
+	if (on)
+		gic |= GIC_PTCE;
+	else
+		gic &= ~GIC_PTCE;
+	ravb_write(ndev, gic, GIC);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
+static int ravb_ptp_perout(struct ptp_clock_info *ptp,
+			   struct ptp_perout_request *req, int on)
+{
+	struct ravb_private *priv = container_of(ptp, struct ravb_private,
+						 ptp.info);
+	struct net_device *ndev = priv->ndev;
+	struct ravb_ptp_perout *perout;
+	unsigned long flags;
+	int error = 0;
+	u32 gic;
+
+	if (req->index)
+		return -EINVAL;
+
+	if (on) {
+		u64 start_ns;
+		u64 period_ns;
+
+		start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
+		period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
+
+		if (start_ns > U32_MAX) {
+			netdev_warn(ndev,
+				    "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
+			return -ERANGE;
+		}
+
+		if (period_ns > U32_MAX) {
+			netdev_warn(ndev,
+				    "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
+			return -ERANGE;
+		}
+
+		spin_lock_irqsave(&priv->lock, flags);
+
+		perout = &priv->ptp.perout[req->index];
+		perout->target = (u32)start_ns;
+		perout->period = (u32)period_ns;
+		error = ravb_ptp_update_compare(priv, (u32)start_ns);
+		if (!error) {
+			/* Unmask interrupt */
+			gic = ravb_read(ndev, GIC);
+			gic |= GIC_PTME;
+			ravb_write(ndev, gic, GIC);
+		}
+	} else	{
+		spin_lock_irqsave(&priv->lock, flags);
+
+		perout = &priv->ptp.perout[req->index];
+		perout->period = 0;
+
+		/* Mask interrupt */
+		gic = ravb_read(ndev, GIC);
+		gic &= ~GIC_PTME;
+		ravb_write(ndev, gic, GIC);
+	}
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return error;
+}
+
+static int ravb_ptp_enable(struct ptp_clock_info *ptp,
+			   struct ptp_clock_request *req, int on)
+{
+	switch (req->type) {
+	case PTP_CLK_REQ_EXTTS:
+		return ravb_ptp_extts(ptp, &req->extts, on);
+	case PTP_CLK_REQ_PEROUT:
+		return ravb_ptp_perout(ptp, &req->perout, on);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static const struct ptp_clock_info ravb_ptp_info = {
+	.owner		= THIS_MODULE,
+	.name		= "ravb clock",
+	.max_adj	= 50000000,
+	.n_ext_ts	= N_EXT_TS,
+	.n_per_out	= N_PER_OUT,
+	.adjfreq	= ravb_ptp_adjfreq,
+	.adjtime	= ravb_ptp_adjtime,
+	.gettime64	= ravb_ptp_gettime64,
+	.settime64	= ravb_ptp_settime64,
+	.enable		= ravb_ptp_enable,
+};
+
+/* Caller must hold the lock */
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	u32 gis = ravb_read(ndev, GIS);
+
+	gis &= ravb_read(ndev, GIC);
+	if (gis & GIS_PTCF) {
+		struct ptp_clock_event event;
+
+		event.type = PTP_CLOCK_EXTTS;
+		event.index = 0;
+		event.timestamp = ravb_read(ndev, GCPT);
+		ptp_clock_event(priv->ptp.clock, &event);
+	}
+	if (gis & GIS_PTMF) {
+		struct ravb_ptp_perout *perout = priv->ptp.perout;
+
+		if (perout->period) {
+			perout->target += perout->period;
+			ravb_ptp_update_compare(priv, perout->target);
+		}
+	}
+
+	if (gis) {
+		ravb_write(ndev, ~gis, GIS);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	unsigned long flags;
+	u32 gccr;
+
+	priv->ptp.info = ravb_ptp_info;
+
+	priv->ptp.default_addend = ravb_read(ndev, GTI);
+	priv->ptp.current_addend = priv->ptp.default_addend;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+	gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
+	ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
+}
+
+void ravb_ptp_stop(struct net_device *ndev)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	ravb_write(ndev, 0, GIC);
+	ravb_write(ndev, 0, GIS);
+
+	ptp_clock_unregister(priv->ptp.clock);
+}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index cf98cc9..2d8578cade 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -181,7 +181,7 @@
 	size_t data_size;
 	size_t tlv_size;
 	struct rocker_desc *desc;
-	DEFINE_DMA_UNMAP_ADDR(mapaddr);
+	dma_addr_t mapaddr;
 };
 
 struct rocker_dma_ring_info {
@@ -225,6 +225,7 @@
 	struct napi_struct napi_rx;
 	struct rocker_dma_ring_info tx_ring;
 	struct rocker_dma_ring_info rx_ring;
+	struct list_head trans_mem;
 };
 
 struct rocker {
@@ -236,21 +237,21 @@
 	struct {
 		u64 id;
 	} hw;
-	spinlock_t cmd_ring_lock;
+	spinlock_t cmd_ring_lock;		/* for cmd ring accesses */
 	struct rocker_dma_ring_info cmd_ring;
 	struct rocker_dma_ring_info event_ring;
 	DECLARE_HASHTABLE(flow_tbl, 16);
-	spinlock_t flow_tbl_lock;
+	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
 	u64 flow_tbl_next_cookie;
 	DECLARE_HASHTABLE(group_tbl, 16);
-	spinlock_t group_tbl_lock;
+	spinlock_t group_tbl_lock;		/* for group tbl accesses */
 	DECLARE_HASHTABLE(fdb_tbl, 16);
-	spinlock_t fdb_tbl_lock;
+	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
 	unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
 	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
-	spinlock_t internal_vlan_tbl_lock;
+	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
 	DECLARE_HASHTABLE(neigh_tbl, 16);
-	spinlock_t neigh_tbl_lock;
+	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
 	u32 neigh_tbl_next_index;
 };
 
@@ -294,7 +295,7 @@
 	return (_vlan_id >= start && _vlan_id <= end);
 }
 
-static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
 				      u16 vid, bool *pop_vlan)
 {
 	__be16 vlan_id;
@@ -311,7 +312,7 @@
 	return vlan_id;
 }
 
-static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
 				   __be16 vlan_id)
 {
 	if (rocker_vlan_id_is_internal(vlan_id))
@@ -320,11 +321,88 @@
 	return ntohs(vlan_id);
 }
 
-static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
 {
 	return !!rocker_port->bridge_dev;
 }
 
+#define ROCKER_OP_FLAG_REMOVE		BIT(0)
+#define ROCKER_OP_FLAG_NOWAIT		BIT(1)
+#define ROCKER_OP_FLAG_LEARNED		BIT(2)
+#define ROCKER_OP_FLAG_REFRESH		BIT(3)
+
+static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
+				     enum switchdev_trans trans, int flags,
+				     size_t size)
+{
+	struct list_head *elem = NULL;
+	gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
+			  GFP_ATOMIC : GFP_KERNEL;
+
+	/* If in transaction prepare phase, allocate the memory
+	 * and enqueue it on a per-port list.  If in transaction
+	 * commit phase, dequeue the memory from the per-port list
+	 * rather than re-allocating the memory.  The idea is the
+	 * driver code paths for prepare and commit are identical
+	 * so the memory allocated in the prepare phase is the
+	 * memory used in the commit phase.
+	 */
+
+	switch (trans) {
+	case SWITCHDEV_TRANS_PREPARE:
+		elem = kzalloc(size + sizeof(*elem), gfp_flags);
+		if (!elem)
+			return NULL;
+		list_add_tail(elem, &rocker_port->trans_mem);
+		break;
+	case SWITCHDEV_TRANS_COMMIT:
+		BUG_ON(list_empty(&rocker_port->trans_mem));
+		elem = rocker_port->trans_mem.next;
+		list_del_init(elem);
+		break;
+	case SWITCHDEV_TRANS_NONE:
+		elem = kzalloc(size + sizeof(*elem), gfp_flags);
+		if (elem)
+			INIT_LIST_HEAD(elem);
+		break;
+	default:
+		break;
+	}
+
+	return elem ? elem + 1 : NULL;
+}
+
+static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
+				 enum switchdev_trans trans, int flags,
+				 size_t size)
+{
+	return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
+}
+
+static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
+				 enum switchdev_trans trans, int flags,
+				 size_t n, size_t size)
+{
+	return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
+}
+
+static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+{
+	struct list_head *elem;
+
+	/* Frees are ignored if in transaction prepare phase.  The
+	 * memory remains on the per-port list until freed in the
+	 * commit phase.
+	 */
+
+	if (trans == SWITCHDEV_TRANS_PREPARE)
+		return;
+
+	elem = (struct list_head *)mem - 1;
+	BUG_ON(!list_empty(elem));
+	kfree(elem);
+}
+
 struct rocker_wait {
 	wait_queue_head_t wait;
 	bool done;
@@ -343,20 +421,23 @@
 	rocker_wait_reset(wait);
 }
 
-static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
+					      enum switchdev_trans trans,
+					      int flags)
 {
 	struct rocker_wait *wait;
 
-	wait = kmalloc(sizeof(*wait), gfp);
+	wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
 	if (!wait)
 		return NULL;
 	rocker_wait_init(wait);
 	return wait;
 }
 
-static void rocker_wait_destroy(struct rocker_wait *work)
+static void rocker_wait_destroy(enum switchdev_trans trans,
+				struct rocker_wait *wait)
 {
-	kfree(work);
+	rocker_port_kfree(trans, wait);
 }
 
 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
@@ -374,18 +455,18 @@
 	wake_up(&wait->wait);
 }
 
-static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
 {
 	return rocker->msix_entries[vector].vector;
 }
 
-static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
 {
 	return rocker_msix_vector(rocker_port->rocker,
 				  ROCKER_MSIX_VEC_TX(rocker_port->port_number));
 }
 
-static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
 {
 	return rocker_msix_vector(rocker_port->rocker,
 				  ROCKER_MSIX_VEC_RX(rocker_port->port_number));
@@ -404,9 +485,9 @@
  * HW basic testing functions
  *****************************/
 
-static int rocker_reg_test(struct rocker *rocker)
+static int rocker_reg_test(const struct rocker *rocker)
 {
-	struct pci_dev *pdev = rocker->pdev;
+	const struct pci_dev *pdev = rocker->pdev;
 	u64 test_reg;
 	u64 rnd;
 
@@ -434,12 +515,12 @@
 	return 0;
 }
 
-static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
-			       u32 test_type, dma_addr_t dma_handle,
-			       unsigned char *buf, unsigned char *expect,
-			       size_t size)
+static int rocker_dma_test_one(const struct rocker *rocker,
+			       struct rocker_wait *wait, u32 test_type,
+			       dma_addr_t dma_handle, const unsigned char *buf,
+			       const unsigned char *expect, size_t size)
 {
-	struct pci_dev *pdev = rocker->pdev;
+	const struct pci_dev *pdev = rocker->pdev;
 	int i;
 
 	rocker_wait_reset(wait);
@@ -463,7 +544,7 @@
 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
 
-static int rocker_dma_test_offset(struct rocker *rocker,
+static int rocker_dma_test_offset(const struct rocker *rocker,
 				  struct rocker_wait *wait, int offset)
 {
 	struct pci_dev *pdev = rocker->pdev;
@@ -523,7 +604,8 @@
 	return err;
 }
 
-static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+static int rocker_dma_test(const struct rocker *rocker,
+			   struct rocker_wait *wait)
 {
 	int i;
 	int err;
@@ -545,9 +627,9 @@
 	return IRQ_HANDLED;
 }
 
-static int rocker_basic_hw_test(struct rocker *rocker)
+static int rocker_basic_hw_test(const struct rocker *rocker)
 {
-	struct pci_dev *pdev = rocker->pdev;
+	const struct pci_dev *pdev = rocker->pdev;
 	struct rocker_wait wait;
 	int err;
 
@@ -680,7 +762,7 @@
 	return *(u64 *) rocker_tlv_data(tlv);
 }
 
-static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
 			     const char *buf, int buf_len)
 {
 	const struct rocker_tlv *tlv;
@@ -693,19 +775,19 @@
 		u32 type = rocker_tlv_type(tlv);
 
 		if (type > 0 && type <= maxtype)
-			tb[type] = (struct rocker_tlv *) tlv;
+			tb[type] = tlv;
 	}
 }
 
-static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
 				    const struct rocker_tlv *tlv)
 {
 	rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
 			 rocker_tlv_len(tlv));
 }
 
-static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
-				  struct rocker_desc_info *desc_info)
+static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+				  const struct rocker_desc_info *desc_info)
 {
 	rocker_tlv_parse(tb, maxtype, desc_info->data,
 			 desc_info->desc->tlv_size);
@@ -790,9 +872,9 @@
 }
 
 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
-				   struct rocker_tlv *start)
+				   const struct rocker_tlv *start)
 {
-	desc_info->tlv_size = (char *) start - desc_info->data;
+	desc_info->tlv_size = (const char *) start - desc_info->data;
 }
 
 /******************************************
@@ -804,7 +886,7 @@
 	return ++pos == limit ? 0 : pos;
 }
 
-static int rocker_desc_err(struct rocker_desc_info *desc_info)
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
 {
 	int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
 
@@ -832,31 +914,31 @@
 	return -EINVAL;
 }
 
-static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
 {
 	desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
 }
 
-static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
 {
 	u32 comp_err = desc_info->desc->comp_err;
 
 	return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
 }
 
-static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
 {
 	return (void *)(uintptr_t)desc_info->desc->cookie;
 }
 
-static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
 				       void *ptr)
 {
 	desc_info->desc->cookie = (uintptr_t) ptr;
 }
 
 static struct rocker_desc_info *
-rocker_desc_head_get(struct rocker_dma_ring_info *info)
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
 {
 	static struct rocker_desc_info *desc_info;
 	u32 head = __pos_inc(info->head, info->size);
@@ -868,15 +950,15 @@
 	return desc_info;
 }
 
-static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
 {
 	desc_info->desc->buf_size = desc_info->data_size;
 	desc_info->desc->tlv_size = desc_info->tlv_size;
 }
 
-static void rocker_desc_head_set(struct rocker *rocker,
+static void rocker_desc_head_set(const struct rocker *rocker,
 				 struct rocker_dma_ring_info *info,
-				 struct rocker_desc_info *desc_info)
+				 const struct rocker_desc_info *desc_info)
 {
 	u32 head = __pos_inc(info->head, info->size);
 
@@ -901,8 +983,8 @@
 	return desc_info;
 }
 
-static void rocker_dma_ring_credits_set(struct rocker *rocker,
-					struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+					const struct rocker_dma_ring_info *info,
 					u32 credits)
 {
 	if (credits)
@@ -915,7 +997,7 @@
 		   min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
 }
 
-static int rocker_dma_ring_create(struct rocker *rocker,
+static int rocker_dma_ring_create(const struct rocker *rocker,
 				  unsigned int type,
 				  size_t size,
 				  struct rocker_dma_ring_info *info)
@@ -951,8 +1033,8 @@
 	return 0;
 }
 
-static void rocker_dma_ring_destroy(struct rocker *rocker,
-				    struct rocker_dma_ring_info *info)
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+				    const struct rocker_dma_ring_info *info)
 {
 	rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
 
@@ -962,7 +1044,7 @@
 	kfree(info->desc_info);
 }
 
-static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
 					     struct rocker_dma_ring_info *info)
 {
 	int i;
@@ -977,8 +1059,8 @@
 	rocker_desc_commit(&info->desc_info[i]);
 }
 
-static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
-				      struct rocker_dma_ring_info *info,
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+				      const struct rocker_dma_ring_info *info,
 				      int direction, size_t buf_size)
 {
 	struct pci_dev *pdev = rocker->pdev;
@@ -1015,7 +1097,7 @@
 
 rollback:
 	for (i--; i >= 0; i--) {
-		struct rocker_desc_info *desc_info = &info->desc_info[i];
+		const struct rocker_desc_info *desc_info = &info->desc_info[i];
 
 		pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
 				 desc_info->data_size, direction);
@@ -1024,15 +1106,15 @@
 	return err;
 }
 
-static void rocker_dma_ring_bufs_free(struct rocker *rocker,
-				      struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+				      const struct rocker_dma_ring_info *info,
 				      int direction)
 {
 	struct pci_dev *pdev = rocker->pdev;
 	int i;
 
 	for (i = 0; i < info->size; i++) {
-		struct rocker_desc_info *desc_info = &info->desc_info[i];
+		const struct rocker_desc_info *desc_info = &info->desc_info[i];
 		struct rocker_desc *desc = &info->desc[i];
 
 		desc->buf_addr = 0;
@@ -1045,7 +1127,7 @@
 
 static int rocker_dma_rings_init(struct rocker *rocker)
 {
-	struct pci_dev *pdev = rocker->pdev;
+	const struct pci_dev *pdev = rocker->pdev;
 	int err;
 
 	err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
@@ -1102,11 +1184,11 @@
 	rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
 }
 
-static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
-				      struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
 				      struct rocker_desc_info *desc_info,
 				      struct sk_buff *skb, size_t buf_len)
 {
+	const struct rocker *rocker = rocker_port->rocker;
 	struct pci_dev *pdev = rocker->pdev;
 	dma_addr_t dma_handle;
 
@@ -1126,13 +1208,12 @@
 	return -EMSGSIZE;
 }
 
-static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
 {
 	return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
 }
 
-static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
-					struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
 					struct rocker_desc_info *desc_info)
 {
 	struct net_device *dev = rocker_port->dev;
@@ -1149,8 +1230,7 @@
 	skb = netdev_alloc_skb_ip_align(dev, buf_len);
 	if (!skb)
 		return -ENOMEM;
-	err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
-					 skb, buf_len);
+	err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
 	if (err) {
 		dev_kfree_skb_any(skb);
 		return err;
@@ -1159,8 +1239,8 @@
 	return 0;
 }
 
-static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
-					 struct rocker_tlv **attrs)
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+					 const struct rocker_tlv **attrs)
 {
 	struct pci_dev *pdev = rocker->pdev;
 	dma_addr_t dma_handle;
@@ -1174,10 +1254,10 @@
 	pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
 }
 
-static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
-					struct rocker_desc_info *desc_info)
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+					const struct rocker_desc_info *desc_info)
 {
-	struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
 	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
 
 	if (!skb)
@@ -1187,15 +1267,15 @@
 	dev_kfree_skb_any(skb);
 }
 
-static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
-					 struct rocker_port *rocker_port)
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
 {
-	struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+	const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+	const struct rocker *rocker = rocker_port->rocker;
 	int i;
 	int err;
 
 	for (i = 0; i < rx_ring->size; i++) {
-		err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+		err = rocker_dma_rx_ring_skb_alloc(rocker_port,
 						   &rx_ring->desc_info[i]);
 		if (err)
 			goto rollback;
@@ -1208,10 +1288,10 @@
 	return err;
 }
 
-static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
-					 struct rocker_port *rocker_port)
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
 {
-	struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+	const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+	const struct rocker *rocker = rocker_port->rocker;
 	int i;
 
 	for (i = 0; i < rx_ring->size; i++)
@@ -1257,7 +1337,7 @@
 		goto err_dma_rx_ring_bufs_alloc;
 	}
 
-	err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+	err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
 	if (err) {
 		netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
 		goto err_dma_rx_ring_skbs_alloc;
@@ -1283,7 +1363,7 @@
 {
 	struct rocker *rocker = rocker_port->rocker;
 
-	rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+	rocker_dma_rx_ring_skbs_free(rocker_port);
 	rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
 				  PCI_DMA_BIDIRECTIONAL);
 	rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
@@ -1292,7 +1372,8 @@
 	rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
 }
 
-static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+				   bool enable)
 {
 	u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
@@ -1310,7 +1391,7 @@
 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
 {
 	struct rocker *rocker = dev_id;
-	struct rocker_desc_info *desc_info;
+	const struct rocker_desc_info *desc_info;
 	struct rocker_wait *wait;
 	u32 credits = 0;
 
@@ -1319,7 +1400,7 @@
 		wait = rocker_desc_cookie_ptr_get(desc_info);
 		if (wait->nowait) {
 			rocker_desc_gen_clear(desc_info);
-			rocker_wait_destroy(wait);
+			rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
 		} else {
 			rocker_wait_wake_up(wait);
 		}
@@ -1331,22 +1412,22 @@
 	return IRQ_HANDLED;
 }
 
-static void rocker_port_link_up(struct rocker_port *rocker_port)
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
 {
 	netif_carrier_on(rocker_port->dev);
 	netdev_info(rocker_port->dev, "Link is up\n");
 }
 
-static void rocker_port_link_down(struct rocker_port *rocker_port)
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
 {
 	netif_carrier_off(rocker_port->dev);
 	netdev_info(rocker_port->dev, "Link is down\n");
 }
 
-static int rocker_event_link_change(struct rocker *rocker,
+static int rocker_event_link_change(const struct rocker *rocker,
 				    const struct rocker_tlv *info)
 {
-	struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
 	unsigned int port_number;
 	bool link_up;
 	struct rocker_port *rocker_port;
@@ -1373,22 +1454,18 @@
 	return 0;
 }
 
-#define ROCKER_OP_FLAG_REMOVE		BIT(0)
-#define ROCKER_OP_FLAG_NOWAIT		BIT(1)
-#define ROCKER_OP_FLAG_LEARNED		BIT(2)
-#define ROCKER_OP_FLAG_REFRESH		BIT(3)
-
 static int rocker_port_fdb(struct rocker_port *rocker_port,
+			   enum switchdev_trans trans,
 			   const unsigned char *addr,
 			   __be16 vlan_id, int flags);
 
-static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
 				      const struct rocker_tlv *info)
 {
-	struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
 	unsigned int port_number;
 	struct rocker_port *rocker_port;
-	unsigned char *addr;
+	const unsigned char *addr;
 	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
 	__be16 vlan_id;
 
@@ -1411,14 +1488,15 @@
 	    rocker_port->stp_state != BR_STATE_FORWARDING)
 		return 0;
 
-	return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+	return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
+			       addr, vlan_id, flags);
 }
 
-static int rocker_event_process(struct rocker *rocker,
-				struct rocker_desc_info *desc_info)
+static int rocker_event_process(const struct rocker *rocker,
+				const struct rocker_desc_info *desc_info)
 {
-	struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
-	struct rocker_tlv *info;
+	const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+	const struct rocker_tlv *info;
 	u16 type;
 
 	rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
@@ -1442,8 +1520,8 @@
 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
 {
 	struct rocker *rocker = dev_id;
-	struct pci_dev *pdev = rocker->pdev;
-	struct rocker_desc_info *desc_info;
+	const struct pci_dev *pdev = rocker->pdev;
+	const struct rocker_desc_info *desc_info;
 	u32 credits = 0;
 	int err;
 
@@ -1487,65 +1565,75 @@
  * Command interface
  ********************/
 
-typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
-			       struct rocker_port *rocker_port,
-			       struct rocker_desc_info *desc_info,
-			       void *priv);
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+				    struct rocker_desc_info *desc_info,
+				    void *priv);
 
-static int rocker_cmd_exec(struct rocker *rocker,
-			   struct rocker_port *rocker_port,
-			   rocker_cmd_cb_t prepare, void *prepare_priv,
-			   rocker_cmd_cb_t process, void *process_priv,
-			   bool nowait)
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+				    const struct rocker_desc_info *desc_info,
+				    void *priv);
+
+static int rocker_cmd_exec(struct rocker_port *rocker_port,
+			   enum switchdev_trans trans, int flags,
+			   rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+			   rocker_cmd_proc_cb_t process, void *process_priv)
 {
+	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_desc_info *desc_info;
 	struct rocker_wait *wait;
-	unsigned long flags;
+	bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
+	unsigned long lock_flags;
 	int err;
 
-	wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+	wait = rocker_wait_create(rocker_port, trans, flags);
 	if (!wait)
 		return -ENOMEM;
 	wait->nowait = nowait;
 
-	spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+	spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
+
 	desc_info = rocker_desc_head_get(&rocker->cmd_ring);
 	if (!desc_info) {
-		spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+		spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
 		err = -EAGAIN;
 		goto out;
 	}
-	err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+
+	err = prepare(rocker_port, desc_info, prepare_priv);
 	if (err) {
-		spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+		spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
 		goto out;
 	}
+
 	rocker_desc_cookie_ptr_set(desc_info, wait);
-	rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
-	spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+	if (trans != SWITCHDEV_TRANS_PREPARE)
+		rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
+
+	spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
 
 	if (nowait)
 		return 0;
 
-	if (!rocker_wait_event_timeout(wait, HZ / 10))
-		return -EIO;
+	if (trans != SWITCHDEV_TRANS_PREPARE)
+		if (!rocker_wait_event_timeout(wait, HZ / 10))
+			return -EIO;
 
 	err = rocker_desc_err(desc_info);
 	if (err)
 		return err;
 
 	if (process)
-		err = process(rocker, rocker_port, desc_info, process_priv);
+		err = process(rocker_port, desc_info, process_priv);
 
 	rocker_desc_gen_clear(desc_info);
 out:
-	rocker_wait_destroy(wait);
+	rocker_wait_destroy(trans, wait);
 	return err;
 }
 
 static int
-rocker_cmd_get_port_settings_prep(struct rocker *rocker,
-				  struct rocker_port *rocker_port,
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
 				  struct rocker_desc_info *desc_info,
 				  void *priv)
 {
@@ -1565,14 +1653,13 @@
 }
 
 static int
-rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
-					  struct rocker_port *rocker_port,
-					  struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+					  const struct rocker_desc_info *desc_info,
 					  void *priv)
 {
 	struct ethtool_cmd *ecmd = priv;
-	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
-	struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+	const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
 	u32 speed;
 	u8 duplex;
 	u8 autoneg;
@@ -1604,15 +1691,14 @@
 }
 
 static int
-rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
-					  struct rocker_port *rocker_port,
-					  struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+					  const struct rocker_desc_info *desc_info,
 					  void *priv)
 {
 	unsigned char *macaddr = priv;
-	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
-	struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
-	struct rocker_tlv *attr;
+	const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+	const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+	const struct rocker_tlv *attr;
 
 	rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
 	if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1637,17 +1723,16 @@
 };
 
 static int
-rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
-					    struct rocker_port *rocker_port,
-					    struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+					    const struct rocker_desc_info *desc_info,
 					    void *priv)
 {
-	struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
-	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+	const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
 	struct port_name *name = priv;
-	struct rocker_tlv *attr;
+	const struct rocker_tlv *attr;
 	size_t i, j, len;
-	char *str;
+	const char *str;
 
 	rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
 	if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1679,8 +1764,7 @@
 }
 
 static int
-rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
-					  struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
 					  struct rocker_desc_info *desc_info,
 					  void *priv)
 {
@@ -1710,12 +1794,11 @@
 }
 
 static int
-rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
-					  struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
 					  struct rocker_desc_info *desc_info,
 					  void *priv)
 {
-	unsigned char *macaddr = priv;
+	const unsigned char *macaddr = priv;
 	struct rocker_tlv *cmd_info;
 
 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1735,8 +1818,7 @@
 }
 
 static int
-rocker_cmd_set_port_learning_prep(struct rocker *rocker,
-				  struct rocker_port *rocker_port,
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
 				  struct rocker_desc_info *desc_info,
 				  void *priv)
 {
@@ -1761,46 +1843,48 @@
 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
 						struct ethtool_cmd *ecmd)
 {
-	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
 			       rocker_cmd_get_port_settings_prep, NULL,
 			       rocker_cmd_get_port_settings_ethtool_proc,
-			       ecmd, false);
+			       ecmd);
 }
 
 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
 						unsigned char *macaddr)
 {
-	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
 			       rocker_cmd_get_port_settings_prep, NULL,
 			       rocker_cmd_get_port_settings_macaddr_proc,
-			       macaddr, false);
+			       macaddr);
 }
 
 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
 						struct ethtool_cmd *ecmd)
 {
-	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
 			       rocker_cmd_set_port_settings_ethtool_prep,
-			       ecmd, NULL, NULL, false);
+			       ecmd, NULL, NULL);
 }
 
 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
 						unsigned char *macaddr)
 {
-	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
 			       rocker_cmd_set_port_settings_macaddr_prep,
-			       macaddr, NULL, NULL, false);
+			       macaddr, NULL, NULL);
 }
 
-static int rocker_port_set_learning(struct rocker_port *rocker_port)
+static int rocker_port_set_learning(struct rocker_port *rocker_port,
+				    enum switchdev_trans trans)
 {
-	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	return rocker_cmd_exec(rocker_port, trans, 0,
 			       rocker_cmd_set_port_learning_prep,
-			       NULL, NULL, NULL, false);
+			       NULL, NULL, NULL);
 }
 
-static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
-					   struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+				const struct rocker_flow_tbl_entry *entry)
 {
 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 			       entry->key.ig_port.in_pport))
@@ -1815,8 +1899,9 @@
 	return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
-					struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+			     const struct rocker_flow_tbl_entry *entry)
 {
 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 			       entry->key.vlan.in_pport))
@@ -1838,8 +1923,9 @@
 	return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
-					    struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+				 const struct rocker_flow_tbl_entry *entry)
 {
 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 			       entry->key.term_mac.in_pport))
@@ -1875,7 +1961,7 @@
 
 static int
 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
-				      struct rocker_flow_tbl_entry *entry)
+				      const struct rocker_flow_tbl_entry *entry)
 {
 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 				entry->key.ucast_routing.eth_type))
@@ -1896,8 +1982,9 @@
 	return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
-					  struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+			       const struct rocker_flow_tbl_entry *entry)
 {
 	if (entry->key.bridge.has_eth_dst &&
 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
@@ -1929,8 +2016,9 @@
 	return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
-				       struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+			    const struct rocker_flow_tbl_entry *entry)
 {
 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 			       entry->key.acl.in_pport))
@@ -1995,12 +2083,11 @@
 	return 0;
 }
 
-static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
-				   struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
 				   struct rocker_desc_info *desc_info,
 				   void *priv)
 {
-	struct rocker_flow_tbl_entry *entry = priv;
+	const struct rocker_flow_tbl_entry *entry = priv;
 	struct rocker_tlv *cmd_info;
 	int err = 0;
 
@@ -2053,8 +2140,7 @@
 	return 0;
 }
 
-static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
-				   struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
 				   struct rocker_desc_info *desc_info,
 				   void *priv)
 {
@@ -2090,7 +2176,7 @@
 
 static int
 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
-				    struct rocker_group_tbl_entry *entry)
+				    const struct rocker_group_tbl_entry *entry)
 {
 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 			       entry->l2_rewrite.group_id))
@@ -2113,7 +2199,7 @@
 
 static int
 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
-				   struct rocker_group_tbl_entry *entry)
+				   const struct rocker_group_tbl_entry *entry)
 {
 	int i;
 	struct rocker_tlv *group_ids;
@@ -2139,7 +2225,7 @@
 
 static int
 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
-				    struct rocker_group_tbl_entry *entry)
+				    const struct rocker_group_tbl_entry *entry)
 {
 	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
@@ -2163,8 +2249,7 @@
 	return 0;
 }
 
-static int rocker_cmd_group_tbl_add(struct rocker *rocker,
-				    struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
 				    struct rocker_desc_info *desc_info,
 				    void *priv)
 {
@@ -2209,8 +2294,7 @@
 	return 0;
 }
 
-static int rocker_cmd_group_tbl_del(struct rocker *rocker,
-				    struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
 				    struct rocker_desc_info *desc_info,
 				    void *priv)
 {
@@ -2293,7 +2377,8 @@
 }
 
 static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+rocker_flow_tbl_find(const struct rocker *rocker,
+		     const struct rocker_flow_tbl_entry *match)
 {
 	struct rocker_flow_tbl_entry *found;
 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
@@ -2308,24 +2393,25 @@
 }
 
 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
-			       struct rocker_flow_tbl_entry *match,
-			       bool nowait)
+			       enum switchdev_trans trans, int flags,
+			       struct rocker_flow_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_flow_tbl_entry *found;
 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
-	unsigned long flags;
+	unsigned long lock_flags;
 
 	match->key_crc32 = crc32(~0, &match->key, key_len);
 
-	spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+	spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
 
 	found = rocker_flow_tbl_find(rocker, match);
 
 	if (found) {
 		match->cookie = found->cookie;
-		hash_del(&found->entry);
-		kfree(found);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_del(&found->entry);
+		rocker_port_kfree(trans, found);
 		found = match;
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
 	} else {
@@ -2334,73 +2420,69 @@
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
 	}
 
-	hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+	if (trans != SWITCHDEV_TRANS_PREPARE)
+		hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
 
-	spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+	spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
 
-	return rocker_cmd_exec(rocker, rocker_port,
-			       rocker_cmd_flow_tbl_add,
-			       found, NULL, NULL, nowait);
+	return rocker_cmd_exec(rocker_port, trans, flags,
+			       rocker_cmd_flow_tbl_add, found, NULL, NULL);
 }
 
 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
-			       struct rocker_flow_tbl_entry *match,
-			       bool nowait)
+			       enum switchdev_trans trans, int flags,
+			       struct rocker_flow_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_flow_tbl_entry *found;
 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
-	unsigned long flags;
+	unsigned long lock_flags;
 	int err = 0;
 
 	match->key_crc32 = crc32(~0, &match->key, key_len);
 
-	spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+	spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
 
 	found = rocker_flow_tbl_find(rocker, match);
 
 	if (found) {
-		hash_del(&found->entry);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_del(&found->entry);
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
 	}
 
-	spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+	spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
 
-	kfree(match);
+	rocker_port_kfree(trans, match);
 
 	if (found) {
-		err = rocker_cmd_exec(rocker, rocker_port,
+		err = rocker_cmd_exec(rocker_port, trans, flags,
 				      rocker_cmd_flow_tbl_del,
-				      found, NULL, NULL, nowait);
-		kfree(found);
+				      found, NULL, NULL);
+		rocker_port_kfree(trans, found);
 	}
 
 	return err;
 }
 
-static gfp_t rocker_op_flags_gfp(int flags)
-{
-	return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
-}
-
 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
-			      int flags, struct rocker_flow_tbl_entry *entry)
+			      enum switchdev_trans trans, int flags,
+			      struct rocker_flow_tbl_entry *entry)
 {
-	bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
 	if (flags & ROCKER_OP_FLAG_REMOVE)
-		return rocker_flow_tbl_del(rocker_port, entry, nowait);
+		return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
 	else
-		return rocker_flow_tbl_add(rocker_port, entry, nowait);
+		return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
-				   int flags, u32 in_pport, u32 in_pport_mask,
+				   enum switchdev_trans trans, int flags,
+				   u32 in_pport, u32 in_pport_mask,
 				   enum rocker_of_dpa_table_id goto_tbl)
 {
 	struct rocker_flow_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2410,18 +2492,19 @@
 	entry->key.ig_port.in_pport_mask = in_pport_mask;
 	entry->key.ig_port.goto_tbl = goto_tbl;
 
-	return rocker_flow_tbl_do(rocker_port, flags, entry);
+	return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
-				int flags, u32 in_pport,
-				__be16 vlan_id, __be16 vlan_id_mask,
+				enum switchdev_trans trans, int flags,
+				u32 in_pport, __be16 vlan_id,
+				__be16 vlan_id_mask,
 				enum rocker_of_dpa_table_id goto_tbl,
 				bool untagged, __be16 new_vlan_id)
 {
 	struct rocker_flow_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2435,10 +2518,11 @@
 	entry->key.vlan.untagged = untagged;
 	entry->key.vlan.new_vlan_id = new_vlan_id;
 
-	return rocker_flow_tbl_do(rocker_port, flags, entry);
+	return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+				    enum switchdev_trans trans,
 				    u32 in_pport, u32 in_pport_mask,
 				    __be16 eth_type, const u8 *eth_dst,
 				    const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2447,7 +2531,7 @@
 {
 	struct rocker_flow_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2471,11 +2555,11 @@
 	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
 	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 
-	return rocker_flow_tbl_do(rocker_port, flags, entry);
+	return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
-				  int flags,
+				  enum switchdev_trans trans, int flags,
 				  const u8 *eth_dst, const u8 *eth_dst_mask,
 				  __be16 vlan_id, u32 tunnel_id,
 				  enum rocker_of_dpa_table_id goto_tbl,
@@ -2487,7 +2571,7 @@
 	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
 	bool wild = false;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2500,7 +2584,7 @@
 	if (eth_dst_mask) {
 		entry->key.bridge.has_eth_dst_mask = 1;
 		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
-		if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+		if (!ether_addr_equal(eth_dst_mask, ff_mac))
 			wild = true;
 	}
 
@@ -2525,10 +2609,11 @@
 	entry->key.bridge.group_id = group_id;
 	entry->key.bridge.copy_to_cpu = copy_to_cpu;
 
-	return rocker_flow_tbl_do(rocker_port, flags, entry);
+	return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+					  enum switchdev_trans trans,
 					  __be16 eth_type, __be32 dst,
 					  __be32 dst_mask, u32 priority,
 					  enum rocker_of_dpa_table_id goto_tbl,
@@ -2536,7 +2621,7 @@
 {
 	struct rocker_flow_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2550,30 +2635,29 @@
 	entry->key_len = offsetof(struct rocker_flow_tbl_key,
 				  ucast_routing.group_id);
 
-	return rocker_flow_tbl_do(rocker_port, flags, entry);
+	return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
-			       int flags, u32 in_pport,
-			       u32 in_pport_mask,
+			       enum switchdev_trans trans, int flags,
+			       u32 in_pport, u32 in_pport_mask,
 			       const u8 *eth_src, const u8 *eth_src_mask,
 			       const u8 *eth_dst, const u8 *eth_dst_mask,
-			       __be16 eth_type,
-			       __be16 vlan_id, __be16 vlan_id_mask,
-			       u8 ip_proto, u8 ip_proto_mask,
-			       u8 ip_tos, u8 ip_tos_mask,
+			       __be16 eth_type, __be16 vlan_id,
+			       __be16 vlan_id_mask, u8 ip_proto,
+			       u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
 			       u32 group_id)
 {
 	u32 priority;
 	struct rocker_flow_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
 	priority = ROCKER_PRIORITY_ACL_NORMAL;
 	if (eth_dst && eth_dst_mask) {
-		if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+		if (ether_addr_equal(eth_dst_mask, mcast_mac))
 			priority = ROCKER_PRIORITY_ACL_DFLT;
 		else if (is_link_local_ether_addr(eth_dst))
 			priority = ROCKER_PRIORITY_ACL_CTRL;
@@ -2602,12 +2686,12 @@
 	entry->key.acl.ip_tos_mask = ip_tos_mask;
 	entry->key.acl.group_id = group_id;
 
-	return rocker_flow_tbl_do(rocker_port, flags, entry);
+	return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(struct rocker *rocker,
-		      struct rocker_group_tbl_entry *match)
+rocker_group_tbl_find(const struct rocker *rocker,
+		      const struct rocker_group_tbl_entry *match)
 {
 	struct rocker_group_tbl_entry *found;
 
@@ -2620,34 +2704,36 @@
 	return NULL;
 }
 
-static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+					struct rocker_group_tbl_entry *entry)
 {
 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
-		kfree(entry->group_ids);
+		rocker_port_kfree(trans, entry->group_ids);
 		break;
 	default:
 		break;
 	}
-	kfree(entry);
+	rocker_port_kfree(trans, entry);
 }
 
 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
-				struct rocker_group_tbl_entry *match,
-				bool nowait)
+				enum switchdev_trans trans, int flags,
+				struct rocker_group_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_group_tbl_entry *found;
-	unsigned long flags;
+	unsigned long lock_flags;
 
-	spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+	spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
 
 	found = rocker_group_tbl_find(rocker, match);
 
 	if (found) {
-		hash_del(&found->entry);
-		rocker_group_tbl_entry_free(found);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_del(&found->entry);
+		rocker_group_tbl_entry_free(trans, found);
 		found = match;
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
 	} else {
@@ -2655,116 +2741,118 @@
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
 	}
 
-	hash_add(rocker->group_tbl, &found->entry, found->group_id);
+	if (trans != SWITCHDEV_TRANS_PREPARE)
+		hash_add(rocker->group_tbl, &found->entry, found->group_id);
 
-	spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+	spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
 
-	return rocker_cmd_exec(rocker, rocker_port,
-			       rocker_cmd_group_tbl_add,
-			       found, NULL, NULL, nowait);
+	return rocker_cmd_exec(rocker_port, trans, flags,
+			       rocker_cmd_group_tbl_add, found, NULL, NULL);
 }
 
 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
-				struct rocker_group_tbl_entry *match,
-				bool nowait)
+				enum switchdev_trans trans, int flags,
+				struct rocker_group_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_group_tbl_entry *found;
-	unsigned long flags;
+	unsigned long lock_flags;
 	int err = 0;
 
-	spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+	spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
 
 	found = rocker_group_tbl_find(rocker, match);
 
 	if (found) {
-		hash_del(&found->entry);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_del(&found->entry);
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
 	}
 
-	spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+	spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
 
-	rocker_group_tbl_entry_free(match);
+	rocker_group_tbl_entry_free(trans, match);
 
 	if (found) {
-		err = rocker_cmd_exec(rocker, rocker_port,
+		err = rocker_cmd_exec(rocker_port, trans, flags,
 				      rocker_cmd_group_tbl_del,
-				      found, NULL, NULL, nowait);
-		rocker_group_tbl_entry_free(found);
+				      found, NULL, NULL);
+		rocker_group_tbl_entry_free(trans, found);
 	}
 
 	return err;
 }
 
 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
-			       int flags, struct rocker_group_tbl_entry *entry)
+			       enum switchdev_trans trans, int flags,
+			       struct rocker_group_tbl_entry *entry)
 {
-	bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
 	if (flags & ROCKER_OP_FLAG_REMOVE)
-		return rocker_group_tbl_del(rocker_port, entry, nowait);
+		return rocker_group_tbl_del(rocker_port, trans, flags, entry);
 	else
-		return rocker_group_tbl_add(rocker_port, entry, nowait);
+		return rocker_group_tbl_add(rocker_port, trans, flags, entry);
 }
 
 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
-				     int flags, __be16 vlan_id,
-				     u32 out_pport, int pop_vlan)
+				     enum switchdev_trans trans, int flags,
+				     __be16 vlan_id, u32 out_pport,
+				     int pop_vlan)
 {
 	struct rocker_group_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
 	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
 	entry->l2_interface.pop_vlan = pop_vlan;
 
-	return rocker_group_tbl_do(rocker_port, flags, entry);
+	return rocker_group_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+				   enum switchdev_trans trans,
 				   int flags, u8 group_count,
-				   u32 *group_ids, u32 group_id)
+				   const u32 *group_ids, u32 group_id)
 {
 	struct rocker_group_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
 	entry->group_id = group_id;
 	entry->group_count = group_count;
 
-	entry->group_ids = kcalloc(group_count, sizeof(u32),
-				   rocker_op_flags_gfp(flags));
+	entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
+					       group_count, sizeof(u32));
 	if (!entry->group_ids) {
-		kfree(entry);
+		rocker_port_kfree(trans, entry);
 		return -ENOMEM;
 	}
 	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
 
-	return rocker_group_tbl_do(rocker_port, flags, entry);
+	return rocker_group_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
-				 int flags, __be16 vlan_id,
-				 u8 group_count, u32 *group_ids,
-				 u32 group_id)
+				 enum switchdev_trans trans, int flags,
+				 __be16 vlan_id, u8 group_count,
+				 const u32 *group_ids, u32 group_id)
 {
-	return rocker_group_l2_fan_out(rocker_port, flags,
+	return rocker_group_l2_fan_out(rocker_port, trans, flags,
 				       group_count, group_ids,
 				       group_id);
 }
 
 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
-				   int flags, u32 index, u8 *src_mac,
-				   u8 *dst_mac, __be16 vlan_id,
-				   bool ttl_check, u32 pport)
+				   enum switchdev_trans trans, int flags,
+				   u32 index, const u8 *src_mac, const u8 *dst_mac,
+				   __be16 vlan_id, bool ttl_check, u32 pport)
 {
 	struct rocker_group_tbl_entry *entry;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2777,11 +2865,11 @@
 	entry->l3_unicast.ttl_check = ttl_check;
 	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
 
-	return rocker_group_tbl_do(rocker_port, flags, entry);
+	return rocker_group_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static struct rocker_neigh_tbl_entry *
-	rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
 {
 	struct rocker_neigh_tbl_entry *found;
 
@@ -2794,37 +2882,44 @@
 }
 
 static void _rocker_neigh_add(struct rocker *rocker,
+			      enum switchdev_trans trans,
 			      struct rocker_neigh_tbl_entry *entry)
 {
-	entry->index = rocker->neigh_tbl_next_index++;
+	if (trans != SWITCHDEV_TRANS_COMMIT)
+		entry->index = rocker->neigh_tbl_next_index++;
+	if (trans == SWITCHDEV_TRANS_PREPARE)
+		return;
 	entry->ref_count++;
 	hash_add(rocker->neigh_tbl, &entry->entry,
 		 be32_to_cpu(entry->ip_addr));
 }
 
-static void _rocker_neigh_del(struct rocker *rocker,
+static void _rocker_neigh_del(enum switchdev_trans trans,
 			      struct rocker_neigh_tbl_entry *entry)
 {
+	if (trans == SWITCHDEV_TRANS_PREPARE)
+		return;
 	if (--entry->ref_count == 0) {
 		hash_del(&entry->entry);
-		kfree(entry);
+		rocker_port_kfree(trans, entry);
 	}
 }
 
-static void _rocker_neigh_update(struct rocker *rocker,
-				 struct rocker_neigh_tbl_entry *entry,
-				 u8 *eth_dst, bool ttl_check)
+static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
+				 enum switchdev_trans trans,
+				 const u8 *eth_dst, bool ttl_check)
 {
 	if (eth_dst) {
 		ether_addr_copy(entry->eth_dst, eth_dst);
 		entry->ttl_check = ttl_check;
-	} else {
+	} else if (trans != SWITCHDEV_TRANS_PREPARE) {
 		entry->ref_count++;
 	}
 }
 
 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
-				  int flags, __be32 ip_addr, u8 *eth_dst)
+				  enum switchdev_trans trans,
+				  int flags, __be32 ip_addr, const u8 *eth_dst)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_neigh_tbl_entry *entry;
@@ -2840,7 +2935,7 @@
 	bool removing;
 	int err = 0;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2857,12 +2952,12 @@
 		entry->dev = rocker_port->dev;
 		ether_addr_copy(entry->eth_dst, eth_dst);
 		entry->ttl_check = true;
-		_rocker_neigh_add(rocker, entry);
+		_rocker_neigh_add(rocker, trans, entry);
 	} else if (removing) {
 		memcpy(entry, found, sizeof(*entry));
-		_rocker_neigh_del(rocker, found);
+		_rocker_neigh_del(trans, found);
 	} else if (updating) {
-		_rocker_neigh_update(rocker, found, eth_dst, true);
+		_rocker_neigh_update(found, trans, eth_dst, true);
 		memcpy(entry, found, sizeof(*entry));
 	} else {
 		err = -ENOENT;
@@ -2879,7 +2974,7 @@
 	 * other routes' nexthops.
 	 */
 
-	err = rocker_group_l3_unicast(rocker_port, flags,
+	err = rocker_group_l3_unicast(rocker_port, trans, flags,
 				      entry->index,
 				      rocker_port->dev->dev_addr,
 				      entry->eth_dst,
@@ -2895,7 +2990,7 @@
 
 	if (adding || removing) {
 		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
-		err = rocker_flow_tbl_ucast4_routing(rocker_port,
+		err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
 						     eth_type, ip_addr,
 						     inet_make_mask(32),
 						     priority, goto_tbl,
@@ -2909,13 +3004,13 @@
 
 err_out:
 	if (!adding)
-		kfree(entry);
+		rocker_port_kfree(trans, entry);
 
 	return err;
 }
 
 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
-				    __be32 ip_addr)
+				    enum switchdev_trans trans, __be32 ip_addr)
 {
 	struct net_device *dev = rocker_port->dev;
 	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -2933,7 +3028,8 @@
 	 */
 
 	if (n->nud_state & NUD_VALID)
-		err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+		err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
+					     ip_addr, n->ha);
 	else
 		neigh_event_send(n, NULL);
 
@@ -2941,7 +3037,8 @@
 	return err;
 }
 
-static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
+			       enum switchdev_trans trans, int flags,
 			       __be32 ip_addr, u32 *index)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -2954,7 +3051,7 @@
 	bool resolved = true;
 	int err = 0;
 
-	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+	entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
 	if (!entry)
 		return -ENOMEM;
 
@@ -2971,13 +3068,13 @@
 	if (adding) {
 		entry->ip_addr = ip_addr;
 		entry->dev = rocker_port->dev;
-		_rocker_neigh_add(rocker, entry);
+		_rocker_neigh_add(rocker, trans, entry);
 		*index = entry->index;
 		resolved = false;
 	} else if (removing) {
-		_rocker_neigh_del(rocker, found);
+		_rocker_neigh_del(trans, found);
 	} else if (updating) {
-		_rocker_neigh_update(rocker, found, NULL, false);
+		_rocker_neigh_update(found, trans, NULL, false);
 		resolved = !is_zero_ether_addr(found->eth_dst);
 	} else {
 		err = -ENOENT;
@@ -2986,7 +3083,7 @@
 	spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
 
 	if (!adding)
-		kfree(entry);
+		rocker_port_kfree(trans, entry);
 
 	if (err)
 		return err;
@@ -2994,24 +3091,25 @@
 	/* Resolved means neigh ip_addr is resolved to neigh mac. */
 
 	if (!resolved)
-		err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+		err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
 
 	return err;
 }
 
 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+					enum switchdev_trans trans,
 					int flags, __be16 vlan_id)
 {
 	struct rocker_port *p;
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker *rocker = rocker_port->rocker;
 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
 	u32 *group_ids;
 	u8 group_count = 0;
 	int err = 0;
 	int i;
 
-	group_ids = kcalloc(rocker->port_count, sizeof(u32),
-			    rocker_op_flags_gfp(flags));
+	group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
+					rocker->port_count, sizeof(u32));
 	if (!group_ids)
 		return -ENOMEM;
 
@@ -3022,6 +3120,8 @@
 
 	for (i = 0; i < rocker->port_count; i++) {
 		p = rocker->ports[i];
+		if (!p)
+			continue;
 		if (!rocker_port_is_bridged(p))
 			continue;
 		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
@@ -3034,23 +3134,22 @@
 	if (group_count == 0)
 		goto no_ports_in_vlan;
 
-	err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
-				    group_count, group_ids,
-				    group_id);
+	err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
+				    group_count, group_ids, group_id);
 	if (err)
 		netdev_err(rocker_port->dev,
 			   "Error (%d) port VLAN l2 flood group\n", err);
 
 no_ports_in_vlan:
-	kfree(group_ids);
+	rocker_port_kfree(trans, group_ids);
 	return err;
 }
 
 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
-				      int flags, __be16 vlan_id,
-				      bool pop_vlan)
+				      enum switchdev_trans trans, int flags,
+				      __be16 vlan_id, bool pop_vlan)
 {
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker *rocker = rocker_port->rocker;
 	struct rocker_port *p;
 	bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
 	u32 out_pport;
@@ -3065,9 +3164,8 @@
 	if (rocker_port->stp_state == BR_STATE_LEARNING ||
 	    rocker_port->stp_state == BR_STATE_FORWARDING) {
 		out_pport = rocker_port->pport;
-		err = rocker_group_l2_interface(rocker_port, flags,
-						vlan_id, out_pport,
-						pop_vlan);
+		err = rocker_group_l2_interface(rocker_port, trans, flags,
+						vlan_id, out_pport, pop_vlan);
 		if (err) {
 			netdev_err(rocker_port->dev,
 				   "Error (%d) port VLAN l2 group for pport %d\n",
@@ -3083,7 +3181,7 @@
 
 	for (i = 0; i < rocker->port_count; i++) {
 		p = rocker->ports[i];
-		if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
 			ref++;
 	}
 
@@ -3091,9 +3189,8 @@
 		return 0;
 
 	out_pport = 0;
-	err = rocker_group_l2_interface(rocker_port, flags,
-					vlan_id, out_pport,
-					pop_vlan);
+	err = rocker_group_l2_interface(rocker_port, trans, flags,
+					vlan_id, out_pport, pop_vlan);
 	if (err) {
 		netdev_err(rocker_port->dev,
 			   "Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -3149,14 +3246,14 @@
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
-				     int flags, struct rocker_ctrl *ctrl,
-				     __be16 vlan_id)
+				     enum switchdev_trans trans, int flags,
+				     const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
 	u32 in_pport = rocker_port->pport;
 	u32 in_pport_mask = 0xffffffff;
 	u32 out_pport = 0;
-	u8 *eth_src = NULL;
-	u8 *eth_src_mask = NULL;
+	const u8 *eth_src = NULL;
+	const u8 *eth_src_mask = NULL;
 	__be16 vlan_id_mask = htons(0xffff);
 	u8 ip_proto = 0;
 	u8 ip_proto_mask = 0;
@@ -3165,7 +3262,7 @@
 	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
 	int err;
 
-	err = rocker_flow_tbl_acl(rocker_port, flags,
+	err = rocker_flow_tbl_acl(rocker_port, trans, flags,
 				  in_pport, in_pport_mask,
 				  eth_src, eth_src_mask,
 				  ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -3182,7 +3279,8 @@
 }
 
 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
-					int flags, struct rocker_ctrl *ctrl,
+					enum switchdev_trans trans, int flags,
+					const struct rocker_ctrl *ctrl,
 					__be16 vlan_id)
 {
 	enum rocker_of_dpa_table_id goto_tbl =
@@ -3194,7 +3292,7 @@
 	if (!rocker_port_is_bridged(rocker_port))
 		return 0;
 
-	err = rocker_flow_tbl_bridge(rocker_port, flags,
+	err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
 				     ctrl->eth_dst, ctrl->eth_dst_mask,
 				     vlan_id, tunnel_id,
 				     goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -3206,8 +3304,8 @@
 }
 
 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
-				      int flags, struct rocker_ctrl *ctrl,
-				      __be16 vlan_id)
+				      enum switchdev_trans trans, int flags,
+				      const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
 	u32 in_pport_mask = 0xffffffff;
 	__be16 vlan_id_mask = htons(0xffff);
@@ -3216,7 +3314,7 @@
 	if (ntohs(vlan_id) == 0)
 		vlan_id = rocker_port->internal_vlan_id;
 
-	err = rocker_flow_tbl_term_mac(rocker_port,
+	err = rocker_flow_tbl_term_mac(rocker_port, trans,
 				       rocker_port->pport, in_pport_mask,
 				       ctrl->eth_type, ctrl->eth_dst,
 				       ctrl->eth_dst_mask, vlan_id,
@@ -3229,32 +3327,34 @@
 	return err;
 }
 
-static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
-				 struct rocker_ctrl *ctrl, __be16 vlan_id)
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
+				 enum switchdev_trans trans, int flags,
+				 const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
 	if (ctrl->acl)
-		return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+		return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
 						 ctrl, vlan_id);
 	if (ctrl->bridge)
-		return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+		return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
 						    ctrl, vlan_id);
 
 	if (ctrl->term)
-		return rocker_port_ctrl_vlan_term(rocker_port, flags,
+		return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
 						  ctrl, vlan_id);
 
 	return -EOPNOTSUPP;
 }
 
 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
-				     int flags, __be16 vlan_id)
+				     enum switchdev_trans trans, int flags,
+				     __be16 vlan_id)
 {
 	int err = 0;
 	int i;
 
 	for (i = 0; i < ROCKER_CTRL_MAX; i++) {
 		if (rocker_port->ctrls[i]) {
-			err = rocker_port_ctrl_vlan(rocker_port, flags,
+			err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
 						    &rocker_ctrls[i], vlan_id);
 			if (err)
 				return err;
@@ -3264,8 +3364,9 @@
 	return err;
 }
 
-static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
-			    struct rocker_ctrl *ctrl)
+static int rocker_port_ctrl(struct rocker_port *rocker_port,
+			    enum switchdev_trans trans, int flags,
+			    const struct rocker_ctrl *ctrl)
 {
 	u16 vid;
 	int err = 0;
@@ -3273,7 +3374,7 @@
 	for (vid = 1; vid < VLAN_N_VID; vid++) {
 		if (!test_bit(vid, rocker_port->vlan_bitmap))
 			continue;
-		err = rocker_port_ctrl_vlan(rocker_port, flags,
+		err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
 					    ctrl, htons(vid));
 		if (err)
 			break;
@@ -3282,8 +3383,8 @@
 	return err;
 }
 
-static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
-			    u16 vid)
+static int rocker_port_vlan(struct rocker_port *rocker_port,
+			    enum switchdev_trans trans, int flags, u16 vid)
 {
 	enum rocker_of_dpa_table_id goto_tbl =
 		ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3297,50 +3398,57 @@
 
 	internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
 
-	if (adding && test_and_set_bit(ntohs(internal_vlan_id),
-				       rocker_port->vlan_bitmap))
+	if (adding && test_bit(ntohs(internal_vlan_id),
+			       rocker_port->vlan_bitmap))
 			return 0; /* already added */
-	else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
-						rocker_port->vlan_bitmap))
+	else if (!adding && !test_bit(ntohs(internal_vlan_id),
+				      rocker_port->vlan_bitmap))
 			return 0; /* already removed */
 
+	change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
 	if (adding) {
-		err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+		err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
 						internal_vlan_id);
 		if (err) {
 			netdev_err(rocker_port->dev,
 				   "Error (%d) port ctrl vlan add\n", err);
-			return err;
+			goto err_out;
 		}
 	}
 
-	err = rocker_port_vlan_l2_groups(rocker_port, flags,
+	err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
 					 internal_vlan_id, untagged);
 	if (err) {
 		netdev_err(rocker_port->dev,
 			   "Error (%d) port VLAN l2 groups\n", err);
-		return err;
+		goto err_out;
 	}
 
-	err = rocker_port_vlan_flood_group(rocker_port, flags,
+	err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
 					   internal_vlan_id);
 	if (err) {
 		netdev_err(rocker_port->dev,
 			   "Error (%d) port VLAN l2 flood group\n", err);
-		return err;
+		goto err_out;
 	}
 
-	err = rocker_flow_tbl_vlan(rocker_port, flags,
+	err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
 				   in_pport, vlan_id, vlan_id_mask,
 				   goto_tbl, untagged, internal_vlan_id);
 	if (err)
 		netdev_err(rocker_port->dev,
 			   "Error (%d) port VLAN table\n", err);
 
+err_out:
+	if (trans == SWITCHDEV_TRANS_PREPARE)
+		change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
 	return err;
 }
 
-static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
+			      enum switchdev_trans trans, int flags)
 {
 	enum rocker_of_dpa_table_id goto_tbl;
 	u32 in_pport;
@@ -3355,7 +3463,7 @@
 	in_pport_mask = 0xffff0000;
 	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
 
-	err = rocker_flow_tbl_ig_port(rocker_port, flags,
+	err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
 				      in_pport, in_pport_mask,
 				      goto_tbl);
 	if (err)
@@ -3367,7 +3475,8 @@
 
 struct rocker_fdb_learn_work {
 	struct work_struct work;
-	struct net_device *dev;
+	struct rocker_port *rocker_port;
+	enum switchdev_trans trans;
 	int flags;
 	u8 addr[ETH_ALEN];
 	u16 vid;
@@ -3375,27 +3484,28 @@
 
 static void rocker_port_fdb_learn_work(struct work_struct *work)
 {
-	struct rocker_fdb_learn_work *lw =
+	const struct rocker_fdb_learn_work *lw =
 		container_of(work, struct rocker_fdb_learn_work, work);
 	bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
 	bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
-	struct netdev_switch_notifier_fdb_info info;
+	struct switchdev_notifier_fdb_info info;
 
 	info.addr = lw->addr;
 	info.vid = lw->vid;
 
 	if (learned && removing)
-		call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
-					     lw->dev, &info.info);
+		call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+					 lw->rocker_port->dev, &info.info);
 	else if (learned && !removing)
-		call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
-					     lw->dev, &info.info);
+		call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+					 lw->rocker_port->dev, &info.info);
 
-	kfree(work);
+	rocker_port_kfree(lw->trans, work);
 }
 
 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
-				 int flags, const u8 *addr, __be16 vlan_id)
+				 enum switchdev_trans trans, int flags,
+				 const u8 *addr, __be16 vlan_id)
 {
 	struct rocker_fdb_learn_work *lw;
 	enum rocker_of_dpa_table_id goto_tbl =
@@ -3411,8 +3521,8 @@
 		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
 
 	if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
-		err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
-					     vlan_id, tunnel_id, goto_tbl,
+		err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
+					     NULL, vlan_id, tunnel_id, goto_tbl,
 					     group_id, copy_to_cpu);
 		if (err)
 			return err;
@@ -3424,24 +3534,29 @@
 	if (!rocker_port_is_bridged(rocker_port))
 		return 0;
 
-	lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+	lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
 	if (!lw)
 		return -ENOMEM;
 
 	INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
 
-	lw->dev = rocker_port->dev;
+	lw->rocker_port = rocker_port;
+	lw->trans = trans;
 	lw->flags = flags;
 	ether_addr_copy(lw->addr, addr);
 	lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
 
-	schedule_work(&lw->work);
+	if (trans == SWITCHDEV_TRANS_PREPARE)
+		rocker_port_kfree(trans, lw);
+	else
+		schedule_work(&lw->work);
 
 	return 0;
 }
 
 static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+rocker_fdb_tbl_find(const struct rocker *rocker,
+		    const struct rocker_fdb_tbl_entry *match)
 {
 	struct rocker_fdb_tbl_entry *found;
 
@@ -3453,6 +3568,7 @@
 }
 
 static int rocker_port_fdb(struct rocker_port *rocker_port,
+			   enum switchdev_trans trans,
 			   const unsigned char *addr,
 			   __be16 vlan_id, int flags)
 {
@@ -3462,7 +3578,7 @@
 	bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
 	unsigned long lock_flags;
 
-	fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+	fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
 	if (!fdb)
 		return -ENOMEM;
 
@@ -3477,32 +3593,34 @@
 	found = rocker_fdb_tbl_find(rocker, fdb);
 
 	if (removing && found) {
-		kfree(fdb);
-		hash_del(&found->entry);
+		rocker_port_kfree(trans, fdb);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_del(&found->entry);
 	} else if (!removing && !found) {
-		hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
 	}
 
 	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
 
 	/* Check if adding and already exists, or removing and can't find */
 	if (!found != !removing) {
-		kfree(fdb);
+		rocker_port_kfree(trans, fdb);
 		if (!found && removing)
 			return 0;
 		/* Refreshing existing to update aging timers */
 		flags |= ROCKER_OP_FLAG_REFRESH;
 	}
 
-	return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+	return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
 }
 
-static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
+				 enum switchdev_trans trans, int flags)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_fdb_tbl_entry *found;
 	unsigned long lock_flags;
-	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
 	struct hlist_node *tmp;
 	int bkt;
 	int err = 0;
@@ -3511,6 +3629,8 @@
 	    rocker_port->stp_state == BR_STATE_FORWARDING)
 		return 0;
 
+	flags |= ROCKER_OP_FLAG_REMOVE;
+
 	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
 
 	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
@@ -3518,12 +3638,13 @@
 			continue;
 		if (!found->learned)
 			continue;
-		err = rocker_port_fdb_learn(rocker_port, flags,
+		err = rocker_port_fdb_learn(rocker_port, trans, flags,
 					    found->key.addr,
 					    found->key.vlan_id);
 		if (err)
 			goto err_out;
-		hash_del(&found->entry);
+		if (trans != SWITCHDEV_TRANS_PREPARE)
+			hash_del(&found->entry);
 	}
 
 err_out:
@@ -3533,7 +3654,8 @@
 }
 
 static int rocker_port_router_mac(struct rocker_port *rocker_port,
-				  int flags, __be16 vlan_id)
+				  enum switchdev_trans trans, int flags,
+				  __be16 vlan_id)
 {
 	u32 in_pport_mask = 0xffffffff;
 	__be16 eth_type;
@@ -3546,7 +3668,7 @@
 		vlan_id = rocker_port->internal_vlan_id;
 
 	eth_type = htons(ETH_P_IP);
-	err = rocker_flow_tbl_term_mac(rocker_port,
+	err = rocker_flow_tbl_term_mac(rocker_port, trans,
 				       rocker_port->pport, in_pport_mask,
 				       eth_type, rocker_port->dev->dev_addr,
 				       dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3555,7 +3677,7 @@
 		return err;
 
 	eth_type = htons(ETH_P_IPV6);
-	err = rocker_flow_tbl_term_mac(rocker_port,
+	err = rocker_flow_tbl_term_mac(rocker_port, trans,
 				       rocker_port->pport, in_pport_mask,
 				       eth_type, rocker_port->dev->dev_addr,
 				       dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3564,13 +3686,13 @@
 	return err;
 }
 
-static int rocker_port_fwding(struct rocker_port *rocker_port)
+static int rocker_port_fwding(struct rocker_port *rocker_port,
+			      enum switchdev_trans trans, int flags)
 {
 	bool pop_vlan;
 	u32 out_pport;
 	__be16 vlan_id;
 	u16 vid;
-	int flags = ROCKER_OP_FLAG_NOWAIT;
 	int err;
 
 	/* Port will be forwarding-enabled if its STP state is LEARNING
@@ -3590,9 +3712,8 @@
 			continue;
 		vlan_id = htons(vid);
 		pop_vlan = rocker_vlan_id_is_internal(vlan_id);
-		err = rocker_group_l2_interface(rocker_port, flags,
-						vlan_id, out_pport,
-						pop_vlan);
+		err = rocker_group_l2_interface(rocker_port, trans, flags,
+						vlan_id, out_pport, pop_vlan);
 		if (err) {
 			netdev_err(rocker_port->dev,
 				   "Error (%d) port VLAN l2 group for pport %d\n",
@@ -3604,13 +3725,21 @@
 	return 0;
 }
 
-static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+static int rocker_port_stp_update(struct rocker_port *rocker_port,
+				  enum switchdev_trans trans, int flags,
+				  u8 state)
 {
 	bool want[ROCKER_CTRL_MAX] = { 0, };
-	int flags;
+	bool prev_ctrls[ROCKER_CTRL_MAX];
+	u8 prev_state;
 	int err;
 	int i;
 
+	if (trans == SWITCHDEV_TRANS_PREPARE) {
+		memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
+		prev_state = rocker_port->stp_state;
+	}
+
 	if (rocker_port->stp_state == state)
 		return 0;
 
@@ -3638,45 +3767,57 @@
 
 	for (i = 0; i < ROCKER_CTRL_MAX; i++) {
 		if (want[i] != rocker_port->ctrls[i]) {
-			flags = ROCKER_OP_FLAG_NOWAIT |
-				(want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
-			err = rocker_port_ctrl(rocker_port, flags,
+			int ctrl_flags = flags |
+					 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+			err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
 					       &rocker_ctrls[i]);
 			if (err)
-				return err;
+				goto err_out;
 			rocker_port->ctrls[i] = want[i];
 		}
 	}
 
-	err = rocker_port_fdb_flush(rocker_port);
+	err = rocker_port_fdb_flush(rocker_port, trans, flags);
 	if (err)
-		return err;
+		goto err_out;
 
-	return rocker_port_fwding(rocker_port);
+	err = rocker_port_fwding(rocker_port, trans, flags);
+
+err_out:
+	if (trans == SWITCHDEV_TRANS_PREPARE) {
+		memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+		rocker_port->stp_state = prev_state;
+	}
+
+	return err;
 }
 
-static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
+				  enum switchdev_trans trans, int flags)
 {
 	if (rocker_port_is_bridged(rocker_port))
 		/* bridge STP will enable port */
 		return 0;
 
 	/* port is not bridged, so simulate going to FORWARDING state */
-	return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+	return rocker_port_stp_update(rocker_port, trans, flags,
+				      BR_STATE_FORWARDING);
 }
 
-static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
+				   enum switchdev_trans trans, int flags)
 {
 	if (rocker_port_is_bridged(rocker_port))
 		/* bridge STP will disable port */
 		return 0;
 
 	/* port is not bridged, so simulate going to DISABLED state */
-	return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+	return rocker_port_stp_update(rocker_port, trans, flags,
+				      BR_STATE_DISABLED);
 }
 
 static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
 {
 	struct rocker_internal_vlan_tbl_entry *found;
 
@@ -3731,8 +3872,9 @@
 	return found->vlan_id;
 }
 
-static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
-					     int ifindex)
+static void
+rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
+				 int ifindex)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_internal_vlan_tbl_entry *found;
@@ -3760,11 +3902,12 @@
 	spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
 }
 
-static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
-				int dst_len, struct fib_info *fi, u32 tb_id,
-				int flags)
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
+				enum switchdev_trans trans, __be32 dst,
+				int dst_len, const struct fib_info *fi,
+				u32 tb_id, int flags)
 {
-	struct fib_nh *nh;
+	const struct fib_nh *nh;
 	__be16 eth_type = htons(ETH_P_IP);
 	__be32 dst_mask = inet_make_mask(dst_len);
 	__be16 internal_vlan_id = rocker_port->internal_vlan_id;
@@ -3784,7 +3927,7 @@
 	has_gw = !!nh->nh_gw;
 
 	if (has_gw && nh_on_port) {
-		err = rocker_port_ipv4_nh(rocker_port, flags,
+		err = rocker_port_ipv4_nh(rocker_port, trans, flags,
 					  nh->nh_gw, &index);
 		if (err)
 			return err;
@@ -3795,7 +3938,7 @@
 		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
 	}
 
-	err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+	err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
 					     dst_mask, priority, goto_tbl,
 					     group_id, flags);
 	if (err)
@@ -3834,7 +3977,7 @@
 		goto err_request_rx_irq;
 	}
 
-	err = rocker_port_fwd_enable(rocker_port);
+	err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
 	if (err)
 		goto err_fwd_enable;
 
@@ -3861,7 +4004,8 @@
 	rocker_port_set_enable(rocker_port, false);
 	napi_disable(&rocker_port->napi_rx);
 	napi_disable(&rocker_port->napi_tx);
-	rocker_port_fwd_disable(rocker_port);
+	rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
+				ROCKER_OP_FLAG_NOWAIT);
 	free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
 	free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
 	rocker_port_dma_rings_fini(rocker_port);
@@ -3869,12 +4013,12 @@
 	return 0;
 }
 
-static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
-				       struct rocker_desc_info *desc_info)
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+				       const struct rocker_desc_info *desc_info)
 {
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker *rocker = rocker_port->rocker;
 	struct pci_dev *pdev = rocker->pdev;
-	struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
 	struct rocker_tlv *attr;
 	int rem;
 
@@ -3882,7 +4026,7 @@
 	if (!attrs[ROCKER_TLV_TX_FRAGS])
 		return;
 	rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
-		struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+		const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
 		dma_addr_t dma_handle;
 		size_t len;
 
@@ -3899,11 +4043,11 @@
 	}
 }
 
-static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
 				       struct rocker_desc_info *desc_info,
 				       char *buf, size_t buf_len)
 {
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker *rocker = rocker_port->rocker;
 	struct pci_dev *pdev = rocker->pdev;
 	dma_addr_t dma_handle;
 	struct rocker_tlv *frag;
@@ -4008,187 +4152,6 @@
 	return 0;
 }
 
-static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
-				       __be16 proto, u16 vid)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	int err;
-
-	err = rocker_port_vlan(rocker_port, 0, vid);
-	if (err)
-		return err;
-
-	return rocker_port_router_mac(rocker_port, 0, htons(vid));
-}
-
-static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
-					__be16 proto, u16 vid)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	int err;
-
-	err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
-				     htons(vid));
-	if (err)
-		return err;
-
-	return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
-}
-
-static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-			       struct net_device *dev,
-			       const unsigned char *addr, u16 vid,
-			       u16 nlm_flags)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
-	int flags = 0;
-
-	if (!rocker_port_is_bridged(rocker_port))
-		return -EINVAL;
-
-	return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
-}
-
-static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-			       struct net_device *dev,
-			       const unsigned char *addr, u16 vid)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
-	int flags = ROCKER_OP_FLAG_REMOVE;
-
-	if (!rocker_port_is_bridged(rocker_port))
-		return -EINVAL;
-
-	return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
-}
-
-static int rocker_fdb_fill_info(struct sk_buff *skb,
-				struct rocker_port *rocker_port,
-				const unsigned char *addr, u16 vid,
-				u32 portid, u32 seq, int type,
-				unsigned int flags)
-{
-	struct nlmsghdr *nlh;
-	struct ndmsg *ndm;
-
-	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
-	if (!nlh)
-		return -EMSGSIZE;
-
-	ndm = nlmsg_data(nlh);
-	ndm->ndm_family	 = AF_BRIDGE;
-	ndm->ndm_pad1    = 0;
-	ndm->ndm_pad2    = 0;
-	ndm->ndm_flags	 = NTF_SELF;
-	ndm->ndm_type	 = 0;
-	ndm->ndm_ifindex = rocker_port->dev->ifindex;
-	ndm->ndm_state   = NUD_REACHABLE;
-
-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
-		goto nla_put_failure;
-
-	if (vid && nla_put_u16(skb, NDA_VLAN, vid))
-		goto nla_put_failure;
-
-	nlmsg_end(skb, nlh);
-	return 0;
-
-nla_put_failure:
-	nlmsg_cancel(skb, nlh);
-	return -EMSGSIZE;
-}
-
-static int rocker_port_fdb_dump(struct sk_buff *skb,
-				struct netlink_callback *cb,
-				struct net_device *dev,
-				struct net_device *filter_dev,
-				int idx)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	struct rocker *rocker = rocker_port->rocker;
-	struct rocker_fdb_tbl_entry *found;
-	struct hlist_node *tmp;
-	int bkt;
-	unsigned long lock_flags;
-	const unsigned char *addr;
-	u16 vid;
-	int err;
-
-	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
-	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
-		if (found->key.pport != rocker_port->pport)
-			continue;
-		if (idx < cb->args[0])
-			goto skip;
-		addr = found->key.addr;
-		vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
-		err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
-					   NETLINK_CB(cb->skb).portid,
-					   cb->nlh->nlmsg_seq,
-					   RTM_NEWNEIGH, NLM_F_MULTI);
-		if (err < 0)
-			break;
-skip:
-		++idx;
-	}
-	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
-	return idx;
-}
-
-static int rocker_port_bridge_setlink(struct net_device *dev,
-				      struct nlmsghdr *nlh, u16 flags)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	struct nlattr *protinfo;
-	struct nlattr *attr;
-	int err;
-
-	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
-				   IFLA_PROTINFO);
-	if (protinfo) {
-		attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
-		if (attr) {
-			if (nla_len(attr) < sizeof(u8))
-				return -EINVAL;
-
-			if (nla_get_u8(attr))
-				rocker_port->brport_flags |= BR_LEARNING;
-			else
-				rocker_port->brport_flags &= ~BR_LEARNING;
-			err = rocker_port_set_learning(rocker_port);
-			if (err)
-				return err;
-		}
-		attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
-		if (attr) {
-			if (nla_len(attr) < sizeof(u8))
-				return -EINVAL;
-
-			if (nla_get_u8(attr))
-				rocker_port->brport_flags |= BR_LEARNING_SYNC;
-			else
-				rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
-		}
-	}
-
-	return 0;
-}
-
-static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-				      struct net_device *dev,
-				      u32 filter_mask, int nlflags)
-{
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	u16 mode = BRIDGE_MODE_UNDEF;
-	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
-
-	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
-				       rocker_port->brport_flags, mask,
-				       nlflags);
-}
-
 static int rocker_port_get_phys_port_name(struct net_device *dev,
 					  char *buf, size_t len)
 {
@@ -4196,10 +4159,10 @@
 	struct port_name name = { .buf = buf, .len = len };
 	int err;
 
-	err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
 			      rocker_cmd_get_port_settings_prep, NULL,
 			      rocker_cmd_get_port_settings_phys_name_proc,
-			      &name, false);
+			      &name);
 
 	return err ? -EOPNOTSUPP : 0;
 }
@@ -4209,13 +4172,12 @@
 	.ndo_stop			= rocker_port_stop,
 	.ndo_start_xmit			= rocker_port_xmit,
 	.ndo_set_mac_address		= rocker_port_set_mac_address,
-	.ndo_vlan_rx_add_vid		= rocker_port_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid		= rocker_port_vlan_rx_kill_vid,
-	.ndo_fdb_add			= rocker_port_fdb_add,
-	.ndo_fdb_del			= rocker_port_fdb_del,
-	.ndo_fdb_dump			= rocker_port_fdb_dump,
-	.ndo_bridge_setlink		= rocker_port_bridge_setlink,
-	.ndo_bridge_getlink		= rocker_port_bridge_getlink,
+	.ndo_bridge_getlink		= switchdev_port_bridge_getlink,
+	.ndo_bridge_setlink		= switchdev_port_bridge_setlink,
+	.ndo_bridge_dellink		= switchdev_port_bridge_dellink,
+	.ndo_fdb_add			= switchdev_port_fdb_add,
+	.ndo_fdb_del			= switchdev_port_fdb_del,
+	.ndo_fdb_dump			= switchdev_port_fdb_dump,
 	.ndo_get_phys_port_name		= rocker_port_get_phys_port_name,
 };
 
@@ -4223,54 +4185,326 @@
  * swdev interface
  ********************/
 
-static int rocker_port_swdev_parent_id_get(struct net_device *dev,
-					   struct netdev_phys_item_id *psid)
+static int rocker_port_attr_get(struct net_device *dev,
+				struct switchdev_attr *attr)
 {
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker_port *rocker_port = netdev_priv(dev);
+	const struct rocker *rocker = rocker_port->rocker;
 
-	psid->id_len = sizeof(rocker->hw.id);
-	memcpy(&psid->id, &rocker->hw.id, psid->id_len);
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+		attr->u.ppid.id_len = sizeof(rocker->hw.id);
+		memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
+		break;
+	case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+		attr->u.brport_flags = rocker_port->brport_flags;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
 	return 0;
 }
 
-static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
 {
-	struct rocker_port *rocker_port = netdev_priv(dev);
+	struct list_head *mem, *tmp;
 
-	return rocker_port_stp_update(rocker_port, state);
+	list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
+		list_del(mem);
+		kfree(mem);
+	}
 }
 
-static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
-					  __be32 dst, int dst_len,
-					  struct fib_info *fi,
-					  u8 tos, u8 type,
-					  u32 nlflags, u32 tb_id)
+static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
+					enum switchdev_trans trans,
+					unsigned long brport_flags)
+{
+	unsigned long orig_flags;
+	int err = 0;
+
+	orig_flags = rocker_port->brport_flags;
+	rocker_port->brport_flags = brport_flags;
+	if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
+		err = rocker_port_set_learning(rocker_port, trans);
+
+	if (trans == SWITCHDEV_TRANS_PREPARE)
+		rocker_port->brport_flags = orig_flags;
+
+	return err;
+}
+
+static int rocker_port_attr_set(struct net_device *dev,
+				struct switchdev_attr *attr)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
+	int err = 0;
+
+	switch (attr->trans) {
+	case SWITCHDEV_TRANS_PREPARE:
+		BUG_ON(!list_empty(&rocker_port->trans_mem));
+		break;
+	case SWITCHDEV_TRANS_ABORT:
+		rocker_port_trans_abort(rocker_port);
+		return 0;
+	default:
+		break;
+	}
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_STP_STATE:
+		err = rocker_port_stp_update(rocker_port, attr->trans,
+					     ROCKER_OP_FLAG_NOWAIT,
+					     attr->u.stp_state);
+		break;
+	case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+		err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+						   attr->u.brport_flags);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int rocker_port_vlan_add(struct rocker_port *rocker_port,
+				enum switchdev_trans trans, u16 vid, u16 flags)
+{
+	int err;
+
+	/* XXX deal with flags for PVID and untagged */
+
+	err = rocker_port_vlan(rocker_port, trans, 0, vid);
+	if (err)
+		return err;
+
+	err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+	if (err)
+		rocker_port_vlan(rocker_port, trans,
+				 ROCKER_OP_FLAG_REMOVE, vid);
+
+	return err;
+}
+
+static int rocker_port_vlans_add(struct rocker_port *rocker_port,
+				 enum switchdev_trans trans,
+				 const struct switchdev_obj_vlan *vlan)
+{
+	u16 vid;
+	int err;
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+		err = rocker_port_vlan_add(rocker_port, trans,
+					   vid, vlan->flags);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int rocker_port_fdb_add(struct rocker_port *rocker_port,
+			       enum switchdev_trans trans,
+			       const struct switchdev_obj_fdb *fdb)
+{
+	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
 	int flags = 0;
 
-	return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
-				    fi, tb_id, flags);
+	if (!rocker_port_is_bridged(rocker_port))
+		return -EINVAL;
+
+	return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
 }
 
-static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
-					  __be32 dst, int dst_len,
-					  struct fib_info *fi,
-					  u8 tos, u8 type, u32 tb_id)
+static int rocker_port_obj_add(struct net_device *dev,
+			       struct switchdev_obj *obj)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
-	int flags = ROCKER_OP_FLAG_REMOVE;
+	const struct switchdev_obj_ipv4_fib *fib4;
+	int err = 0;
 
-	return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
-				    fi, tb_id, flags);
+	switch (obj->trans) {
+	case SWITCHDEV_TRANS_PREPARE:
+		BUG_ON(!list_empty(&rocker_port->trans_mem));
+		break;
+	case SWITCHDEV_TRANS_ABORT:
+		rocker_port_trans_abort(rocker_port);
+		return 0;
+	default:
+		break;
+	}
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = rocker_port_vlans_add(rocker_port, obj->trans,
+					    &obj->u.vlan);
+		break;
+	case SWITCHDEV_OBJ_IPV4_FIB:
+		fib4 = &obj->u.ipv4_fib;
+		err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+					   htonl(fib4->dst), fib4->dst_len,
+					   fib4->fi, fib4->tb_id, 0);
+		break;
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
 }
 
-static const struct swdev_ops rocker_port_swdev_ops = {
-	.swdev_parent_id_get		= rocker_port_swdev_parent_id_get,
-	.swdev_port_stp_update		= rocker_port_swdev_port_stp_update,
-	.swdev_fib_ipv4_add		= rocker_port_swdev_fib_ipv4_add,
-	.swdev_fib_ipv4_del		= rocker_port_swdev_fib_ipv4_del,
+static int rocker_port_vlan_del(struct rocker_port *rocker_port,
+				u16 vid, u16 flags)
+{
+	int err;
+
+	err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+				     ROCKER_OP_FLAG_REMOVE, htons(vid));
+	if (err)
+		return err;
+
+	return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+				ROCKER_OP_FLAG_REMOVE, vid);
+}
+
+static int rocker_port_vlans_del(struct rocker_port *rocker_port,
+				 const struct switchdev_obj_vlan *vlan)
+{
+	u16 vid;
+	int err;
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+		err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int rocker_port_fdb_del(struct rocker_port *rocker_port,
+			       enum switchdev_trans trans,
+			       const struct switchdev_obj_fdb *fdb)
+{
+	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+
+	if (!rocker_port_is_bridged(rocker_port))
+		return -EINVAL;
+
+	return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
+}
+
+static int rocker_port_obj_del(struct net_device *dev,
+			       struct switchdev_obj *obj)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	const struct switchdev_obj_ipv4_fib *fib4;
+	int err = 0;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+		break;
+	case SWITCHDEV_OBJ_IPV4_FIB:
+		fib4 = &obj->u.ipv4_fib;
+		err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+					   htonl(fib4->dst), fib4->dst_len,
+					   fib4->fi, fib4->tb_id,
+					   ROCKER_OP_FLAG_REMOVE);
+		break;
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
+				struct switchdev_obj *obj)
+{
+	struct rocker *rocker = rocker_port->rocker;
+	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+	struct rocker_fdb_tbl_entry *found;
+	struct hlist_node *tmp;
+	unsigned long lock_flags;
+	int bkt;
+	int err = 0;
+
+	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+		if (found->key.pport != rocker_port->pport)
+			continue;
+		fdb->addr = found->key.addr;
+		fdb->vid = rocker_port_vlan_to_vid(rocker_port,
+						   found->key.vlan_id);
+		err = obj->cb(rocker_port->dev, obj);
+		if (err)
+			break;
+	}
+	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+	return err;
+}
+
+static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
+				 struct switchdev_obj *obj)
+{
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+	u16 vid;
+	int err = 0;
+
+	for (vid = 1; vid < VLAN_N_VID; vid++) {
+		if (!test_bit(vid, rocker_port->vlan_bitmap))
+			continue;
+		vlan->flags = 0;
+		if (rocker_vlan_id_is_internal(htons(vid)))
+			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+		vlan->vid_begin = vlan->vid_end = vid;
+		err = obj->cb(rocker_port->dev, obj);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+static int rocker_port_obj_dump(struct net_device *dev,
+				struct switchdev_obj *obj)
+{
+	const struct rocker_port *rocker_port = netdev_priv(dev);
+	int err = 0;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = rocker_port_fdb_dump(rocker_port, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = rocker_port_vlan_dump(rocker_port, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static const struct switchdev_ops rocker_port_switchdev_ops = {
+	.switchdev_port_attr_get	= rocker_port_attr_get,
+	.switchdev_port_attr_set	= rocker_port_attr_set,
+	.switchdev_port_obj_add		= rocker_port_obj_add,
+	.switchdev_port_obj_del		= rocker_port_obj_del,
+	.switchdev_port_obj_dump	= rocker_port_obj_dump,
 };
 
 /********************
@@ -4334,8 +4568,7 @@
 }
 
 static int
-rocker_cmd_get_port_stats_prep(struct rocker *rocker,
-			       struct rocker_port *rocker_port,
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
 			       struct rocker_desc_info *desc_info,
 			       void *priv)
 {
@@ -4359,14 +4592,13 @@
 }
 
 static int
-rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
-				       struct rocker_port *rocker_port,
-				       struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+				       const struct rocker_desc_info *desc_info,
 				       void *priv)
 {
-	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
-	struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
-	struct rocker_tlv *pattr;
+	const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+	const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+	const struct rocker_tlv *pattr;
 	u32 pport;
 	u64 *data = priv;
 	int i;
@@ -4400,10 +4632,10 @@
 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
 					     void *priv)
 {
-	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
 			       rocker_cmd_get_port_stats_prep, NULL,
 			       rocker_cmd_get_port_stats_ethtool_proc,
-			       priv, false);
+			       priv);
 }
 
 static void rocker_port_get_stats(struct net_device *dev,
@@ -4417,8 +4649,6 @@
 		for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
 			data[i] = 0;
 	}
-
-	return;
 }
 
 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
@@ -4453,8 +4683,8 @@
 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
 {
 	struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
-	struct rocker *rocker = rocker_port->rocker;
-	struct rocker_desc_info *desc_info;
+	const struct rocker *rocker = rocker_port->rocker;
+	const struct rocker_desc_info *desc_info;
 	u32 credits = 0;
 	int err;
 
@@ -4472,8 +4702,9 @@
 		if (err == 0) {
 			rocker_port->dev->stats.tx_packets++;
 			rocker_port->dev->stats.tx_bytes += skb->len;
-		} else
+		} else {
 			rocker_port->dev->stats.tx_errors++;
+		}
 
 		dev_kfree_skb_any(skb);
 		credits++;
@@ -4488,11 +4719,11 @@
 	return 0;
 }
 
-static int rocker_port_rx_proc(struct rocker *rocker,
-			       struct rocker_port *rocker_port,
+static int rocker_port_rx_proc(const struct rocker *rocker,
+			       const struct rocker_port *rocker_port,
 			       struct rocker_desc_info *desc_info)
 {
-	struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+	const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
 	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
 	size_t rx_len;
 
@@ -4514,7 +4745,7 @@
 
 	netif_receive_skb(skb);
 
-	return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+	return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
 }
 
 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
@@ -4525,7 +4756,7 @@
 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
 {
 	struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker *rocker = rocker_port->rocker;
 	struct rocker_desc_info *desc_info;
 	u32 credits = 0;
 	int err;
@@ -4565,9 +4796,9 @@
  * PCI driver ops
  *****************/
 
-static void rocker_carrier_init(struct rocker_port *rocker_port)
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
 {
-	struct rocker *rocker = rocker_port->rocker;
+	const struct rocker *rocker = rocker_port->rocker;
 	u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
 	bool link_up;
 
@@ -4578,23 +4809,26 @@
 		netif_carrier_off(rocker_port->dev);
 }
 
-static void rocker_remove_ports(struct rocker *rocker)
+static void rocker_remove_ports(const struct rocker *rocker)
 {
 	struct rocker_port *rocker_port;
 	int i;
 
 	for (i = 0; i < rocker->port_count; i++) {
 		rocker_port = rocker->ports[i];
-		rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+		if (!rocker_port)
+			continue;
+		rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+				   ROCKER_OP_FLAG_REMOVE);
 		unregister_netdev(rocker_port->dev);
 	}
 	kfree(rocker->ports);
 }
 
-static void rocker_port_dev_addr_init(struct rocker *rocker,
-				      struct rocker_port *rocker_port)
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
 {
-	struct pci_dev *pdev = rocker->pdev;
+	const struct rocker *rocker = rocker_port->rocker;
+	const struct pci_dev *pdev = rocker->pdev;
 	int err;
 
 	err = rocker_cmd_get_port_settings_macaddr(rocker_port,
@@ -4607,9 +4841,10 @@
 
 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
 {
-	struct pci_dev *pdev = rocker->pdev;
+	const struct pci_dev *pdev = rocker->pdev;
 	struct rocker_port *rocker_port;
 	struct net_device *dev;
+	u16 untagged_vid = 0;
 	int err;
 
 	dev = alloc_etherdev(sizeof(struct rocker_port));
@@ -4621,20 +4856,19 @@
 	rocker_port->port_number = port_number;
 	rocker_port->pport = port_number + 1;
 	rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+	INIT_LIST_HEAD(&rocker_port->trans_mem);
 
-	rocker_port_dev_addr_init(rocker, rocker_port);
+	rocker_port_dev_addr_init(rocker_port);
 	dev->netdev_ops = &rocker_port_netdev_ops;
 	dev->ethtool_ops = &rocker_port_ethtool_ops;
-	dev->swdev_ops = &rocker_port_swdev_ops;
+	dev->switchdev_ops = &rocker_port_switchdev_ops;
 	netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
 		       NAPI_POLL_WEIGHT);
 	netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
 		       NAPI_POLL_WEIGHT);
 	rocker_carrier_init(rocker_port);
 
-	dev->features |= NETIF_F_NETNS_LOCAL |
-			 NETIF_F_HW_VLAN_CTAG_FILTER |
-			 NETIF_F_HW_SWITCH_OFFLOAD;
+	dev->features |= NETIF_F_NETNS_LOCAL;
 
 	err = register_netdev(dev);
 	if (err) {
@@ -4643,18 +4877,29 @@
 	}
 	rocker->ports[port_number] = rocker_port;
 
-	rocker_port_set_learning(rocker_port);
+	rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
 
-	rocker_port->internal_vlan_id =
-		rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
-	err = rocker_port_ig_tbl(rocker_port, 0);
+	err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
 	if (err) {
 		dev_err(&pdev->dev, "install ig port table failed\n");
 		goto err_port_ig_tbl;
 	}
 
+	rocker_port->internal_vlan_id =
+		rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+
+	err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+				   untagged_vid, 0);
+	if (err) {
+		netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
+		goto err_untagged_vlan;
+	}
+
 	return 0;
 
+err_untagged_vlan:
+	rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+			   ROCKER_OP_FLAG_REMOVE);
 err_port_ig_tbl:
 	unregister_netdev(dev);
 err_register_netdev:
@@ -4669,7 +4914,7 @@
 	int err;
 
 	alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
-	rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+	rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
 	if (!rocker->ports)
 		return -ENOMEM;
 	for (i = 0; i < rocker->port_count; i++) {
@@ -4718,7 +4963,7 @@
 	return err;
 }
 
-static void rocker_msix_fini(struct rocker *rocker)
+static void rocker_msix_fini(const struct rocker *rocker)
 {
 	pci_disable_msix(rocker->pdev);
 	kfree(rocker->msix_entries);
@@ -4884,7 +5129,7 @@
  * Net device notifier event handler
  ************************************/
 
-static bool rocker_port_dev_check(struct net_device *dev)
+static bool rocker_port_dev_check(const struct net_device *dev)
 {
 	return dev->netdev_ops == &rocker_port_netdev_ops;
 }
@@ -4892,45 +5137,55 @@
 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
 				   struct net_device *bridge)
 {
+	u16 untagged_vid = 0;
 	int err;
 
+	/* Port is joining bridge, so the internal VLAN for the
+	 * port is going to change to the bridge internal VLAN.
+	 * Let's remove untagged VLAN (vid=0) from port and
+	 * re-add once internal VLAN has changed.
+	 */
+
+	err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
+	if (err)
+		return err;
+
 	rocker_port_internal_vlan_id_put(rocker_port,
 					 rocker_port->dev->ifindex);
+	rocker_port->internal_vlan_id =
+		rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
 
 	rocker_port->bridge_dev = bridge;
 
-	/* Use bridge internal VLAN ID for untagged pkts */
-	err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
-	if (err)
-		return err;
-	rocker_port->internal_vlan_id =
-		rocker_port_internal_vlan_id_get(rocker_port,
-						 bridge->ifindex);
-	return rocker_port_vlan(rocker_port, 0, 0);
+	return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+				    untagged_vid, 0);
 }
 
 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
 {
+	u16 untagged_vid = 0;
 	int err;
 
+	err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
+	if (err)
+		return err;
+
 	rocker_port_internal_vlan_id_put(rocker_port,
 					 rocker_port->bridge_dev->ifindex);
-
-	rocker_port->bridge_dev = NULL;
-
-	/* Use port internal VLAN ID for untagged pkts */
-	err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
-	if (err)
-		return err;
 	rocker_port->internal_vlan_id =
 		rocker_port_internal_vlan_id_get(rocker_port,
 						 rocker_port->dev->ifindex);
-	err = rocker_port_vlan(rocker_port, 0, 0);
+
+	rocker_port->bridge_dev = NULL;
+
+	err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+				   untagged_vid, 0);
 	if (err)
 		return err;
 
 	if (rocker_port->dev->flags & IFF_UP)
-		err = rocker_port_fwd_enable(rocker_port);
+		err = rocker_port_fwd_enable(rocker_port,
+					     SWITCHDEV_TRANS_NONE, 0);
 
 	return err;
 }
@@ -4989,10 +5244,12 @@
 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
-	int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
+	int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
+		    ROCKER_OP_FLAG_NOWAIT;
 	__be32 ip_addr = *(__be32 *)n->primary_key;
 
-	return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+	return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+				      flags, ip_addr, n->ha);
 }
 
 static int rocker_netevent_event(struct notifier_block *unused,
@@ -5040,7 +5297,7 @@
 	return 0;
 
 err_pci_register_driver:
-	unregister_netdevice_notifier(&rocker_netevent_nb);
+	unregister_netevent_notifier(&rocker_netevent_nb);
 	unregister_netdevice_notifier(&rocker_netdevice_nb);
 	return err;
 }
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index a4e9591..c61fbf9 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -65,9 +65,9 @@
 #define ROCKER_TEST_DMA_CTRL		0x0034
 
 /* Rocker test register ctrl */
-#define ROCKER_TEST_DMA_CTRL_CLEAR	(1 << 0)
-#define ROCKER_TEST_DMA_CTRL_FILL	(1 << 1)
-#define ROCKER_TEST_DMA_CTRL_INVERT	(1 << 2)
+#define ROCKER_TEST_DMA_CTRL_CLEAR	BIT(0)
+#define ROCKER_TEST_DMA_CTRL_FILL	BIT(1)
+#define ROCKER_TEST_DMA_CTRL_INVERT	BIT(2)
 
 /* Rocker DMA ring register offsets */
 #define ROCKER_DMA_DESC_ADDR(x)		(0x1000 + (x) * 32)  /* 8-byte */
@@ -79,7 +79,7 @@
 #define ROCKER_DMA_DESC_RES1(x)		(0x101c + (x) * 32)
 
 /* Rocker dma ctrl register bits */
-#define ROCKER_DMA_DESC_CTRL_RESET	(1 << 0)
+#define ROCKER_DMA_DESC_CTRL_RESET	BIT(0)
 
 /* Rocker DMA ring types */
 enum rocker_dma_type {
@@ -111,7 +111,7 @@
 	u16 comp_err;
 };
 
-#define ROCKER_DMA_DESC_COMP_ERR_GEN	(1 << 15)
+#define ROCKER_DMA_DESC_COMP_ERR_GEN	BIT(15)
 
 /* Rocker DMA TLV struct */
 struct rocker_tlv {
@@ -237,14 +237,14 @@
 	ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
 };
 
-#define ROCKER_RX_FLAGS_IPV4			(1 << 0)
-#define ROCKER_RX_FLAGS_IPV6			(1 << 1)
-#define ROCKER_RX_FLAGS_CSUM_CALC		(1 << 2)
-#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD		(1 << 3)
-#define ROCKER_RX_FLAGS_IP_FRAG			(1 << 4)
-#define ROCKER_RX_FLAGS_TCP			(1 << 5)
-#define ROCKER_RX_FLAGS_UDP			(1 << 6)
-#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD	(1 << 7)
+#define ROCKER_RX_FLAGS_IPV4			BIT(0)
+#define ROCKER_RX_FLAGS_IPV6			BIT(1)
+#define ROCKER_RX_FLAGS_CSUM_CALC		BIT(2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD		BIT(3)
+#define ROCKER_RX_FLAGS_IP_FRAG			BIT(4)
+#define ROCKER_RX_FLAGS_TCP			BIT(5)
+#define ROCKER_RX_FLAGS_UDP			BIT(6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD	BIT(7)
 
 enum {
 	ROCKER_TLV_TX_UNSPEC,
@@ -460,6 +460,6 @@
 #define ROCKER_SWITCH_ID		0x0320 /* 8-byte */
 
 /* Rocker control bits */
-#define ROCKER_CONTROL_RESET		(1 << 0)
+#define ROCKER_CONTROL_RESET		BIT(0)
 
 #endif
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
index 11f168e..69c62d8 100644
--- a/drivers/net/ethernet/seeq/Kconfig
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on HAS_IOMEM
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 0889212..4dd92b7 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -36,3 +36,12 @@
 	  This enables support for the SFC9000 I/O Virtualization
 	  features, allowing accelerated network performance in
 	  virtualized environments.
+config SFC_MCDI_LOGGING
+	bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+	depends on SFC
+	default y
+	---help---
+	  This enables support for tracing of MCDI (Management-Controller-to-
+	  Driver-Interface) commands and responses, allowing debugging of
+	  driver/firmware interaction.  The tracing is actually enabled by
+	  a sysfs file 'mcdi_logging' under the PCI device.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3a83c0d..ce8470fe7 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -3,6 +3,6 @@
 			   tenxpress.o txc43128_phy.o falcon_boards.o \
 			   mcdi.o mcdi_port.o mcdi_mon.o ptp.o
 sfc-$(CONFIG_SFC_MTD)	+= mtd.o
-sfc-$(CONFIG_SFC_SRIOV)	+= siena_sriov.o
+sfc-$(CONFIG_SFC_SRIOV)	+= sriov.o siena_sriov.o ef10_sriov.o
 
 obj-$(CONFIG_SFC)	+= sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fbb6cfa..8476434 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -15,6 +15,7 @@
 #include "nic.h"
 #include "workarounds.h"
 #include "selftest.h"
+#include "ef10_sriov.h"
 #include <linux/in.h>
 #include <linux/jhash.h>
 #include <linux/wait.h>
@@ -30,6 +31,9 @@
 
 /* The reserved RSS context value */
 #define EFX_EF10_RSS_CONTEXT_INVALID	0xffffffff
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
 
 /* The filter table(s) are managed by firmware and we have write-only
  * access.  When removing filters we must identify them to the
@@ -77,7 +81,6 @@
 /* An arbitrary search limit for the software hash table */
 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
 
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
 
@@ -92,9 +95,50 @@
 
 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
 {
-	return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+	int bar;
+
+	bar = efx->type->mem_bar;
+	return resource_size(&efx->pci_dev->resource[bar]);
 }
 
+static int efx_ef10_get_pf_index(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+			  sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+	if (outlen < sizeof(outbuf))
+		return -EIO;
+
+	nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+	return 0;
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_get_vf_index(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+			  sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+	if (outlen < sizeof(outbuf))
+		return -EIO;
+
+	nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
+	return 0;
+}
+#endif
+
 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
@@ -117,6 +161,13 @@
 	nic_data->datapath_caps =
 		MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
 
+	/* record the DPCPU firmware IDs to determine VEB vswitching support.
+	 */
+	nic_data->rx_dpcpu_fw_id =
+		MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+	nic_data->tx_dpcpu_fw_id =
+		MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
 	if (!(nic_data->datapath_caps &
 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
 		netif_err(efx, drv, efx->net_dev,
@@ -147,7 +198,7 @@
 	return rc > 0 ? rc : -ERANGE;
 }
 
-static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
 {
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
 	size_t outlen;
@@ -167,9 +218,66 @@
 	return 0;
 }
 
+static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
+	size_t outlen;
+	int num_addrs, rc;
+
+	MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+		       EVB_PORT_ID_ASSIGNED);
+	rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
+			  sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+	if (rc)
+		return rc;
+	if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
+		return -EIO;
+
+	num_addrs = MCDI_DWORD(outbuf,
+			       VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
+
+	WARN_ON(num_addrs != 1);
+
+	ether_addr_copy(mac_address,
+			MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
+
+	return 0;
+}
+
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+					       struct device_attribute *attr,
+					       char *buf)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+	return sprintf(buf, "%d\n",
+		       ((efx->mcdi->fn_flags) &
+			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+		       ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+	return sprintf(buf, "%d\n",
+		       ((efx->mcdi->fn_flags) &
+			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+		       ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+		   NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
 static int efx_ef10_probe(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data;
+	struct net_device *net_dev = efx->net_dev;
 	int i, rc;
 
 	/* We can have one VI for each 8K region.  However, until we
@@ -178,7 +286,7 @@
 	efx->max_channels =
 		min_t(unsigned int,
 		      EFX_MAX_CHANNELS,
-		      resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+		      efx_ef10_mem_map_size(efx) /
 		      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
 	if (WARN_ON(efx->max_channels == 0))
 		return -EIO;
@@ -188,6 +296,9 @@
 		return -ENOMEM;
 	efx->nic_data = nic_data;
 
+	/* we assume later that we can copy from this buffer in dwords */
+	BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
 	rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
 				  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
 	if (rc)
@@ -209,6 +320,8 @@
 
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
+	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
 	/* In case we're recovering from a crash (kexec), we want to
 	 * cancel any outstanding request by the previous user of this
 	 * function.  We send a special message using the least
@@ -230,45 +343,85 @@
 	if (rc)
 		goto fail3;
 
+	rc = device_create_file(&efx->pci_dev->dev,
+				&dev_attr_link_control_flag);
+	if (rc)
+		goto fail3;
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+	if (rc)
+		goto fail4;
+
+	rc = efx_ef10_get_pf_index(efx);
+	if (rc)
+		goto fail5;
+
 	rc = efx_ef10_init_datapath_caps(efx);
 	if (rc < 0)
-		goto fail3;
+		goto fail5;
 
 	efx->rx_packet_len_offset =
 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 
 	rc = efx_mcdi_port_get_number(efx);
 	if (rc < 0)
-		goto fail3;
+		goto fail5;
 	efx->port_num = rc;
+	net_dev->dev_port = rc;
 
-	rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+	rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
 	if (rc)
-		goto fail3;
+		goto fail5;
 
 	rc = efx_ef10_get_sysclk_freq(efx);
 	if (rc < 0)
-		goto fail3;
+		goto fail5;
 	efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
 
-	/* Check whether firmware supports bug 35388 workaround */
+	/* Check whether firmware supports bug 35388 workaround.
+	 * First try to enable it, then if we get EPERM, just
+	 * ask if it's already enabled
+	 */
 	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
-	if (rc == 0)
+	if (rc == 0) {
 		nic_data->workaround_35388 = true;
-	else if (rc != -ENOSYS && rc != -ENOENT)
-		goto fail3;
+	} else if (rc == -EPERM) {
+		unsigned int enabled;
+
+		rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
+		if (rc)
+			goto fail3;
+		nic_data->workaround_35388 = enabled &
+			MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+	} else if (rc != -ENOSYS && rc != -ENOENT) {
+		goto fail5;
+	}
 	netif_dbg(efx, probe, efx->net_dev,
 		  "workaround for bug 35388 is %sabled\n",
 		  nic_data->workaround_35388 ? "en" : "dis");
 
 	rc = efx_mcdi_mon_probe(efx);
-	if (rc)
-		goto fail3;
+	if (rc && rc != -EPERM)
+		goto fail5;
 
 	efx_ptp_probe(efx, NULL);
 
+#ifdef CONFIG_SFC_SRIOV
+	if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+		struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+		efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+	} else
+#endif
+		ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
 	return 0;
 
+fail5:
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
 fail3:
 	efx_mcdi_fini(efx);
 fail2:
@@ -281,7 +434,7 @@
 
 static int efx_ef10_free_vis(struct efx_nic *efx)
 {
-	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+	MCDI_DECLARE_BUF_ERR(outbuf);
 	size_t outlen;
 	int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
 				    outbuf, sizeof(outbuf), &outlen);
@@ -352,9 +505,9 @@
 static int efx_ef10_link_piobufs(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
-	MCDI_DECLARE_BUF(inbuf,
-			 max(MC_CMD_LINK_PIOBUF_IN_LEN,
-			     MC_CMD_UNLINK_PIOBUF_IN_LEN));
+	_MCDI_DECLARE_BUF(inbuf,
+			  max(MC_CMD_LINK_PIOBUF_IN_LEN,
+			      MC_CMD_UNLINK_PIOBUF_IN_LEN));
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
 	unsigned int offset, index;
@@ -363,6 +516,8 @@
 	BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
 	BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
 
+	memset(inbuf, 0, sizeof(inbuf));
+
 	/* Link a buffer to each VI in the write-combining mapping */
 	for (index = 0; index < nic_data->n_piobufs; ++index) {
 		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -475,6 +630,25 @@
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	int rc;
 
+#ifdef CONFIG_SFC_SRIOV
+	struct efx_ef10_nic_data *nic_data_pf;
+	struct pci_dev *pci_dev_pf;
+	struct efx_nic *efx_pf;
+	struct ef10_vf *vf;
+
+	if (efx->pci_dev->is_virtfn) {
+		pci_dev_pf = efx->pci_dev->physfn;
+		if (pci_dev_pf) {
+			efx_pf = pci_get_drvdata(pci_dev_pf);
+			nic_data_pf = efx_pf->nic_data;
+			vf = nic_data_pf->vf + nic_data->vf_index;
+			vf->efx = NULL;
+		} else
+			netif_info(efx, drv, efx->net_dev,
+				   "Could not get the PF id from VF\n");
+	}
+#endif
+
 	efx_ptp_remove(efx);
 
 	efx_mcdi_mon_remove(efx);
@@ -490,11 +664,78 @@
 	if (!nic_data->must_restore_piobufs)
 		efx_ef10_free_piobufs(efx);
 
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
 	efx_mcdi_fini(efx);
 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
 	kfree(nic_data);
 }
 
+static int efx_ef10_probe_pf(struct efx_nic *efx)
+{
+	return efx_ef10_probe(efx);
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_probe_vf(struct efx_nic *efx)
+{
+	int rc;
+	struct pci_dev *pci_dev_pf;
+
+	/* If the parent PF has no VF data structure, it doesn't know about this
+	 * VF so fail probe.  The VF needs to be re-created.  This can happen
+	 * if the PF driver is unloaded while the VF is assigned to a guest.
+	 */
+	pci_dev_pf = efx->pci_dev->physfn;
+	if (pci_dev_pf) {
+		struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+		struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+		if (!nic_data_pf->vf) {
+			netif_info(efx, drv, efx->net_dev,
+				   "The VF cannot link to its parent PF; "
+				   "please destroy and re-create the VF\n");
+			return -EBUSY;
+		}
+	}
+
+	rc = efx_ef10_probe(efx);
+	if (rc)
+		return rc;
+
+	rc = efx_ef10_get_vf_index(efx);
+	if (rc)
+		goto fail;
+
+	if (efx->pci_dev->is_virtfn) {
+		if (efx->pci_dev->physfn) {
+			struct efx_nic *efx_pf =
+				pci_get_drvdata(efx->pci_dev->physfn);
+			struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
+			struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+			nic_data_p->vf[nic_data->vf_index].efx = efx;
+			nic_data_p->vf[nic_data->vf_index].pci_dev =
+				efx->pci_dev;
+		} else
+			netif_info(efx, drv, efx->net_dev,
+				   "Could not get the PF id from VF\n");
+	}
+
+	return 0;
+
+fail:
+	efx_ef10_remove(efx);
+	return rc;
+}
+#else
+static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
+{
+	return 0;
+}
+#endif
+
 static int efx_ef10_alloc_vis(struct efx_nic *efx,
 			      unsigned int min_vis, unsigned int max_vis)
 {
@@ -687,7 +928,9 @@
 		nic_data->must_restore_piobufs = false;
 	}
 
-	efx_ef10_rx_push_rss_config(efx);
+	/* don't fail init if RSS setup doesn't work */
+	efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+
 	return 0;
 }
 
@@ -702,6 +945,14 @@
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 }
 
+static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
+{
+	if (reason == RESET_TYPE_MC_FAILURE)
+		return RESET_TYPE_DATAPATH;
+
+	return efx_mcdi_map_reset_reason(reason);
+}
+
 static int efx_ef10_map_reset_flags(u32 *flags)
 {
 	enum {
@@ -760,93 +1011,112 @@
 	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
-	EF10_DMA_STAT(tx_bytes, TX_BYTES),
-	EF10_DMA_STAT(tx_packets, TX_PKTS),
-	EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
-	EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
-	EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
-	EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
-	EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
-	EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
-	EF10_DMA_STAT(tx_64, TX_64_PKTS),
-	EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
-	EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
-	EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
-	EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
-	EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
-	EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
-	EF10_DMA_STAT(rx_bytes, RX_BYTES),
-	EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
-	EF10_OTHER_STAT(rx_good_bytes),
-	EF10_OTHER_STAT(rx_bad_bytes),
-	EF10_DMA_STAT(rx_packets, RX_PKTS),
-	EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
-	EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
-	EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
-	EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
-	EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
-	EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
-	EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
-	EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
-	EF10_DMA_STAT(rx_64, RX_64_PKTS),
-	EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
-	EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
-	EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
-	EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
-	EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
-	EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
-	EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
-	EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
-	EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
-	EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
-	EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
-	EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+	EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+	EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+	EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+	EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+	EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+	EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+	EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+	EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+	EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+	EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+	EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+	EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+	EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+	EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+	EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+	EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+	EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+	EF10_OTHER_STAT(port_rx_good_bytes),
+	EF10_OTHER_STAT(port_rx_bad_bytes),
+	EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+	EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+	EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+	EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+	EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+	EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+	EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+	EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+	EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+	EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+	EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+	EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+	EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+	EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+	EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+	EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+	EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+	EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+	EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+	EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+	EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+	EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
 	GENERIC_SW_STAT(rx_nodesc_trunc),
 	GENERIC_SW_STAT(rx_noskb_drops),
-	EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
-	EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
-	EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
-	EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
-	EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
-	EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
-	EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
-	EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
-	EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
-	EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
-	EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
-	EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+	EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+	EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+	EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+	EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+	EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+	EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+	EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+	EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+	EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+	EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+	EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+	EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+	EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+	EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+	EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+	EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+	EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+	EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+	EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+	EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+	EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+	EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+	EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+	EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+	EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+	EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+	EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+	EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+	EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+	EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
 };
 
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |		\
-			       (1ULL << EF10_STAT_tx_packets) |		\
-			       (1ULL << EF10_STAT_tx_pause) |		\
-			       (1ULL << EF10_STAT_tx_unicast) |		\
-			       (1ULL << EF10_STAT_tx_multicast) |	\
-			       (1ULL << EF10_STAT_tx_broadcast) |	\
-			       (1ULL << EF10_STAT_rx_bytes) |		\
-			       (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
-			       (1ULL << EF10_STAT_rx_good_bytes) |	\
-			       (1ULL << EF10_STAT_rx_bad_bytes) |	\
-			       (1ULL << EF10_STAT_rx_packets) |		\
-			       (1ULL << EF10_STAT_rx_good) |		\
-			       (1ULL << EF10_STAT_rx_bad) |		\
-			       (1ULL << EF10_STAT_rx_pause) |		\
-			       (1ULL << EF10_STAT_rx_control) |		\
-			       (1ULL << EF10_STAT_rx_unicast) |		\
-			       (1ULL << EF10_STAT_rx_multicast) |	\
-			       (1ULL << EF10_STAT_rx_broadcast) |	\
-			       (1ULL << EF10_STAT_rx_lt64) |		\
-			       (1ULL << EF10_STAT_rx_64) |		\
-			       (1ULL << EF10_STAT_rx_65_to_127) |	\
-			       (1ULL << EF10_STAT_rx_128_to_255) |	\
-			       (1ULL << EF10_STAT_rx_256_to_511) |	\
-			       (1ULL << EF10_STAT_rx_512_to_1023) |	\
-			       (1ULL << EF10_STAT_rx_1024_to_15xx) |	\
-			       (1ULL << EF10_STAT_rx_15xx_to_jumbo) |	\
-			       (1ULL << EF10_STAT_rx_gtjumbo) |		\
-			       (1ULL << EF10_STAT_rx_bad_gtjumbo) |	\
-			       (1ULL << EF10_STAT_rx_overflow) |	\
-			       (1ULL << EF10_STAT_rx_nodesc_drops) |	\
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |	\
+			       (1ULL << EF10_STAT_port_tx_packets) |	\
+			       (1ULL << EF10_STAT_port_tx_pause) |	\
+			       (1ULL << EF10_STAT_port_tx_unicast) |	\
+			       (1ULL << EF10_STAT_port_tx_multicast) |	\
+			       (1ULL << EF10_STAT_port_tx_broadcast) |	\
+			       (1ULL << EF10_STAT_port_rx_bytes) |	\
+			       (1ULL <<                                 \
+				EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+			       (1ULL << EF10_STAT_port_rx_good_bytes) |	\
+			       (1ULL << EF10_STAT_port_rx_bad_bytes) |	\
+			       (1ULL << EF10_STAT_port_rx_packets) |	\
+			       (1ULL << EF10_STAT_port_rx_good) |	\
+			       (1ULL << EF10_STAT_port_rx_bad) |	\
+			       (1ULL << EF10_STAT_port_rx_pause) |	\
+			       (1ULL << EF10_STAT_port_rx_control) |	\
+			       (1ULL << EF10_STAT_port_rx_unicast) |	\
+			       (1ULL << EF10_STAT_port_rx_multicast) |	\
+			       (1ULL << EF10_STAT_port_rx_broadcast) |	\
+			       (1ULL << EF10_STAT_port_rx_lt64) |	\
+			       (1ULL << EF10_STAT_port_rx_64) |		\
+			       (1ULL << EF10_STAT_port_rx_65_to_127) |	\
+			       (1ULL << EF10_STAT_port_rx_128_to_255) |	\
+			       (1ULL << EF10_STAT_port_rx_256_to_511) |	\
+			       (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+			       (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+			       (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+			       (1ULL << EF10_STAT_port_rx_gtjumbo) |	\
+			       (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+			       (1ULL << EF10_STAT_port_rx_overflow) |	\
+			       (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
 			       (1ULL << GENERIC_STAT_rx_nodesc_trunc) |	\
 			       (1ULL << GENERIC_STAT_rx_noskb_drops))
 
@@ -854,39 +1124,39 @@
  * switchable port we do not expose these because they might not
  * include all the packets they should.
  */
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) |	\
-				 (1ULL << EF10_STAT_tx_lt64) |		\
-				 (1ULL << EF10_STAT_tx_64) |		\
-				 (1ULL << EF10_STAT_tx_65_to_127) |	\
-				 (1ULL << EF10_STAT_tx_128_to_255) |	\
-				 (1ULL << EF10_STAT_tx_256_to_511) |	\
-				 (1ULL << EF10_STAT_tx_512_to_1023) |	\
-				 (1ULL << EF10_STAT_tx_1024_to_15xx) |	\
-				 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) |	\
+				 (1ULL << EF10_STAT_port_tx_lt64) |	\
+				 (1ULL << EF10_STAT_port_tx_64) |	\
+				 (1ULL << EF10_STAT_port_tx_65_to_127) |\
+				 (1ULL << EF10_STAT_port_tx_128_to_255) |\
+				 (1ULL << EF10_STAT_port_tx_256_to_511) |\
+				 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+				 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+				 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
 
 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
  * switchable port we do expose these because the errors will otherwise
  * be silent.
  */
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) |	\
-				  (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+				  (1ULL << EF10_STAT_port_rx_length_error))
 
 /* These statistics are only provided if the firmware supports the
  * capability PM_AND_RXDP_COUNTERS.
  */
 #define HUNT_PM_AND_RXDP_STAT_MASK (					\
-	(1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |			\
-	(1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |			\
-	(1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |			\
-	(1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |			\
-	(1ULL << EF10_STAT_rx_pm_trunc_qbb) |				\
-	(1ULL << EF10_STAT_rx_pm_discard_qbb) |				\
-	(1ULL << EF10_STAT_rx_pm_discard_mapping) |			\
-	(1ULL << EF10_STAT_rx_dp_q_disabled_packets) |			\
-	(1ULL << EF10_STAT_rx_dp_di_dropped_packets) |			\
-	(1ULL << EF10_STAT_rx_dp_streaming_packets) |			\
-	(1ULL << EF10_STAT_rx_dp_hlb_fetch) |				\
-	(1ULL << EF10_STAT_rx_dp_hlb_wait))
+	(1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) |		\
+	(1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) |		\
+	(1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) |		\
+	(1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) |		\
+	(1ULL << EF10_STAT_port_rx_pm_trunc_qbb) |			\
+	(1ULL << EF10_STAT_port_rx_pm_discard_qbb) |			\
+	(1ULL << EF10_STAT_port_rx_pm_discard_mapping) |		\
+	(1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) |		\
+	(1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) |		\
+	(1ULL << EF10_STAT_port_rx_dp_streaming_packets) |		\
+	(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |			\
+	(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
 
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
@@ -894,6 +1164,10 @@
 	u32 port_caps = efx_mcdi_phy_get_caps(efx);
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
+	if (!(efx->mcdi->fn_flags &
+	      1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+		return 0;
+
 	if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
 		raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
 	else
@@ -908,13 +1182,28 @@
 
 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
 {
-	u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	u64 raw_mask[2];
+
+	raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+	/* Only show vadaptor stats when EVB capability is present */
+	if (nic_data->datapath_caps &
+	    (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+		raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+		raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+	} else {
+		raw_mask[1] = 0;
+	}
 
 #if BITS_PER_LONG == 64
-	mask[0] = raw_mask;
+	mask[0] = raw_mask[0];
+	mask[1] = raw_mask[1];
 #else
-	mask[0] = raw_mask & 0xffffffff;
-	mask[1] = raw_mask >> 32;
+	mask[0] = raw_mask[0] & 0xffffffff;
+	mask[1] = raw_mask[0] >> 32;
+	mask[2] = raw_mask[1] & 0xffffffff;
+	mask[3] = raw_mask[1] >> 32;
 #endif
 }
 
@@ -927,7 +1216,51 @@
 				      mask, names);
 }
 
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+					   struct rtnl_link_stats64 *core_stats)
+{
+	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	u64 *stats = nic_data->stats;
+	size_t stats_count = 0, index;
+
+	efx_ef10_get_stat_mask(efx, mask);
+
+	if (full_stats) {
+		for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+			if (efx_ef10_stat_desc[index].name) {
+				*full_stats++ = stats[index];
+				++stats_count;
+			}
+		}
+	}
+
+	if (core_stats) {
+		core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+					 stats[EF10_STAT_rx_multicast] +
+					 stats[EF10_STAT_rx_broadcast];
+		core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+					 stats[EF10_STAT_tx_multicast] +
+					 stats[EF10_STAT_tx_broadcast];
+		core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+				       stats[EF10_STAT_rx_multicast_bytes] +
+				       stats[EF10_STAT_rx_broadcast_bytes];
+		core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+				       stats[EF10_STAT_tx_multicast_bytes] +
+				       stats[EF10_STAT_tx_broadcast_bytes];
+		core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+					 stats[GENERIC_STAT_rx_noskb_drops];
+		core_stats->multicast = stats[EF10_STAT_rx_multicast];
+		core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+		core_stats->rx_errors = core_stats->rx_crc_errors;
+		core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+	}
+
+	return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -952,67 +1285,114 @@
 		return -EAGAIN;
 
 	/* Update derived statistics */
-	efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
-	stats[EF10_STAT_rx_good_bytes] =
-		stats[EF10_STAT_rx_bytes] -
-		stats[EF10_STAT_rx_bytes_minus_good_bytes];
-	efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
-			     stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+	efx_nic_fix_nodesc_drop_stat(efx,
+				     &stats[EF10_STAT_port_rx_nodesc_drops]);
+	stats[EF10_STAT_port_rx_good_bytes] =
+		stats[EF10_STAT_port_rx_bytes] -
+		stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+	efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+			     stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
 	efx_update_sw_stats(efx, stats);
 	return 0;
 }
 
 
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
-				    struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+				       struct rtnl_link_stats64 *core_stats)
 {
-	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
-	struct efx_ef10_nic_data *nic_data = efx->nic_data;
-	u64 *stats = nic_data->stats;
-	size_t stats_count = 0, index;
 	int retry;
 
-	efx_ef10_get_stat_mask(efx, mask);
-
 	/* If we're unlucky enough to read statistics during the DMA, wait
 	 * up to 10ms for it to finish (typically takes <500us)
 	 */
 	for (retry = 0; retry < 100; ++retry) {
-		if (efx_ef10_try_update_nic_stats(efx) == 0)
+		if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
 			break;
 		udelay(100);
 	}
 
-	if (full_stats) {
-		for_each_set_bit(index, mask, EF10_STAT_COUNT) {
-			if (efx_ef10_stat_desc[index].name) {
-				*full_stats++ = stats[index];
-				++stats_count;
-			}
-		}
+	return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+	__le64 generation_start, generation_end;
+	u64 *stats = nic_data->stats;
+	u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+	struct efx_buffer stats_buf;
+	__le64 *dma_stats;
+	int rc;
+
+	spin_unlock_bh(&efx->stats_lock);
+
+	if (in_interrupt()) {
+		/* If in atomic context, cannot update stats.  Just update the
+		 * software stats and return so the caller can continue.
+		 */
+		spin_lock_bh(&efx->stats_lock);
+		efx_update_sw_stats(efx, stats);
+		return 0;
 	}
 
-	if (core_stats) {
-		core_stats->rx_packets = stats[EF10_STAT_rx_packets];
-		core_stats->tx_packets = stats[EF10_STAT_tx_packets];
-		core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
-		core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
-		core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
-					 stats[GENERIC_STAT_rx_nodesc_trunc] +
-					 stats[GENERIC_STAT_rx_noskb_drops];
-		core_stats->multicast = stats[EF10_STAT_rx_multicast];
-		core_stats->rx_length_errors =
-			stats[EF10_STAT_rx_gtjumbo] +
-			stats[EF10_STAT_rx_length_error];
-		core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
-		core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
-		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
-		core_stats->rx_errors = (core_stats->rx_length_errors +
-					 core_stats->rx_crc_errors +
-					 core_stats->rx_frame_errors);
+	efx_ef10_get_stat_mask(efx, mask);
+
+	rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+	if (rc) {
+		spin_lock_bh(&efx->stats_lock);
+		return rc;
 	}
 
-	return stats_count;
+	dma_stats = stats_buf.addr;
+	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+	MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+	MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+			      MAC_STATS_IN_DMA, 1);
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+				NULL, 0, NULL);
+	spin_lock_bh(&efx->stats_lock);
+	if (rc) {
+		/* Expect ENOENT if DMA queues have not been set up */
+		if (rc != -ENOENT || atomic_read(&efx->active_queues))
+			efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+					       sizeof(inbuf), NULL, 0, rc);
+		goto out;
+	}
+
+	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+		WARN_ON_ONCE(1);
+		goto out;
+	}
+	rmb();
+	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+			     stats, stats_buf.addr, false);
+	rmb();
+	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+	if (generation_end != generation_start) {
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	efx_update_sw_stats(efx, stats);
+out:
+	efx_nic_free_buffer(efx, &stats_buf);
+	return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+				       struct rtnl_link_stats64 *core_stats)
+{
+	if (efx_ef10_try_update_nic_stats_vf(efx))
+		return 0;
+
+	return efx_ef10_update_stats_common(efx, full_stats, core_stats);
 }
 
 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1044,6 +1424,14 @@
 	}
 }
 
+static void efx_ef10_get_wol_vf(struct efx_nic *efx,
+				struct ethtool_wolinfo *wol) {}
+
+static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
+{
+	return -EOPNOTSUPP;
+}
+
 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
 {
 	wol->supported = 0;
@@ -1123,13 +1511,17 @@
 	/* All our allocations have been reset */
 	efx_ef10_reset_mc_allocations(efx);
 
+	/* Driver-created vswitches and vports must be re-created */
+	nic_data->must_probe_vswitching = true;
+	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
 	/* The datapath firmware might have been changed */
 	nic_data->must_check_datapath_caps = true;
 
 	/* MAC statistics have been cleared on the NIC; clear the local
 	 * statistic that we update with efx_update_diff_stat().
 	 */
-	nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
 
 	return -EIO;
 }
@@ -1232,16 +1624,17 @@
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
 						       EFX_BUF_SIZE));
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
 	bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
 	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
 	struct efx_channel *channel = tx_queue->channel;
 	struct efx_nic *efx = tx_queue->efx;
-	size_t inlen, outlen;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	size_t inlen;
 	dma_addr_t dma_addr;
 	efx_qword_t *txd;
 	int rc;
 	int i;
+	BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
 
 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
@@ -1251,7 +1644,7 @@
 			      INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
 			      INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
-	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
 
 	dma_addr = tx_queue->txd.buf.dma_addr;
 
@@ -1266,7 +1659,7 @@
 	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
-			  outbuf, sizeof(outbuf), &outlen);
+			  NULL, 0, NULL);
 	if (rc)
 		goto fail;
 
@@ -1299,7 +1692,7 @@
 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
 	struct efx_nic *efx = tx_queue->efx;
 	size_t outlen;
 	int rc;
@@ -1378,19 +1771,33 @@
 	}
 }
 
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
+				      bool exclusive, unsigned *context_size)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	size_t outlen;
 	int rc;
+	u32 alloc_type = exclusive ?
+				MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+				MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+	unsigned rss_spread = exclusive ?
+				efx->rss_spread :
+				min(rounddown_pow_of_two(efx->rss_spread),
+				    EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+	if (!exclusive && rss_spread == 1) {
+		*context = EFX_EF10_RSS_CONTEXT_INVALID;
+		if (context_size)
+			*context_size = 1;
+		return 0;
+	}
 
 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
-		       EVB_PORT_ID_ASSIGNED);
-	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
-		       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
-	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
-		       EFX_MAX_CHANNELS);
+		       nic_data->vport_id);
+	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
 		outbuf, sizeof(outbuf), &outlen);
@@ -1402,6 +1809,9 @@
 
 	*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
 
+	if (context_size)
+		*context_size = rss_spread;
+
 	return 0;
 }
 
@@ -1418,7 +1828,8 @@
 	WARN_ON(rc != 0);
 }
 
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
+				       const u32 *rx_indir_table)
 {
 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -1432,7 +1843,7 @@
 	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
 		MCDI_PTR(tablebuf,
 			 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
-				(u8) efx->rx_indir_table[i];
+				(u8) rx_indir_table[i];
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
 			  sizeof(tablebuf), NULL, 0, NULL);
@@ -1460,27 +1871,119 @@
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 }
 
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
+					      unsigned *context_size)
+{
+	u32 new_rx_rss_context;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+					    false, context_size);
+
+	if (rc != 0)
+		return rc;
+
+	nic_data->rx_rss_context = new_rx_rss_context;
+	nic_data->rx_rss_context_exclusive = false;
+	efx_set_default_rx_indir_table(efx);
+	return 0;
+}
+
+static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
+						 const u32 *rx_indir_table)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	int rc;
+	u32 new_rx_rss_context;
 
-	netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
-
-	if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
-		rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
-		if (rc != 0)
-			goto fail;
+	if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
+	    !nic_data->rx_rss_context_exclusive) {
+		rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+						true, NULL);
+		if (rc == -EOPNOTSUPP)
+			return rc;
+		else if (rc != 0)
+			goto fail1;
+	} else {
+		new_rx_rss_context = nic_data->rx_rss_context;
 	}
 
-	rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+	rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
+					 rx_indir_table);
 	if (rc != 0)
-		goto fail;
+		goto fail2;
 
-	return;
+	if (nic_data->rx_rss_context != new_rx_rss_context)
+		efx_ef10_rx_free_indir_table(efx);
+	nic_data->rx_rss_context = new_rx_rss_context;
+	nic_data->rx_rss_context_exclusive = true;
+	if (rx_indir_table != efx->rx_indir_table)
+		memcpy(efx->rx_indir_table, rx_indir_table,
+		       sizeof(efx->rx_indir_table));
+	return 0;
 
-fail:
+fail2:
+	if (new_rx_rss_context != nic_data->rx_rss_context)
+		efx_ef10_free_rss_context(efx, new_rx_rss_context);
+fail1:
 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+					  const u32 *rx_indir_table)
+{
+	int rc;
+
+	if (efx->rss_spread == 1)
+		return 0;
+
+	rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+
+	if (rc == -ENOBUFS && !user) {
+		unsigned context_size;
+		bool mismatch = false;
+		size_t i;
+
+		for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
+		     i++)
+			mismatch = rx_indir_table[i] !=
+				ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+		rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
+		if (rc == 0) {
+			if (context_size != efx->rss_spread)
+				netif_warn(efx, probe, efx->net_dev,
+					   "Could not allocate an exclusive RSS"
+					   " context; allocated a shared one of"
+					   " different size."
+					   " Wanted %u, got %u.\n",
+					   efx->rss_spread, context_size);
+			else if (mismatch)
+				netif_warn(efx, probe, efx->net_dev,
+					   "Could not allocate an exclusive RSS"
+					   " context; allocated a shared one but"
+					   " could not apply custom"
+					   " indirection.\n");
+			else
+				netif_info(efx, probe, efx->net_dev,
+					   "Could not allocate an exclusive RSS"
+					   " context; allocated a shared one.\n");
+		}
+	}
+	return rc;
+}
+
+static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+					  const u32 *rx_indir_table
+					  __attribute__ ((unused)))
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	if (user)
+		return -EOPNOTSUPP;
+	if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+		return 0;
+	return efx_ef10_rx_push_shared_rss_config(efx, NULL);
 }
 
 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
@@ -1496,14 +1999,15 @@
 	MCDI_DECLARE_BUF(inbuf,
 			 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
 						EFX_BUF_SIZE));
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
 	size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
 	struct efx_nic *efx = rx_queue->efx;
-	size_t inlen, outlen;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	size_t inlen;
 	dma_addr_t dma_addr;
 	int rc;
 	int i;
+	BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
 
 	rx_queue->scatter_n = 0;
 	rx_queue->scatter_len = 0;
@@ -1517,7 +2021,7 @@
 			      INIT_RXQ_IN_FLAG_PREFIX, 1,
 			      INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
-	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
 
 	dma_addr = rx_queue->rxd.buf.dma_addr;
 
@@ -1532,7 +2036,7 @@
 	inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
-			  outbuf, sizeof(outbuf), &outlen);
+			  NULL, 0, NULL);
 	if (rc)
 		netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
 			    efx_rx_queue_index(rx_queue));
@@ -1541,7 +2045,7 @@
 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
 	struct efx_nic *efx = rx_queue->efx;
 	size_t outlen;
 	int rc;
@@ -1703,7 +2207,7 @@
 static void efx_ef10_ev_fini(struct efx_channel *channel)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
 	struct efx_nic *efx = channel->efx;
 	size_t outlen;
 	int rc;
@@ -2286,11 +2790,12 @@
 			       match_fields);
 	}
 
-	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
 		       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
 		       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
 		       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
@@ -3055,6 +3560,9 @@
 	return rc;
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3064,9 +3572,14 @@
 	bool failed = false;
 	int rc;
 
+	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
 	if (!nic_data->must_restore_filters)
 		return;
 
+	if (!table)
+		return;
+
 	spin_lock_bh(&efx->filter_lock);
 
 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
@@ -3102,6 +3615,7 @@
 		nic_data->must_restore_filters = false;
 }
 
+/* Caller must hold efx->filter_sem for write */
 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3110,6 +3624,10 @@
 	unsigned int filter_idx;
 	int rc;
 
+	efx->filter_state = NULL;
+	if (!table)
+		return;
+
 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
 		if (!spec)
@@ -3135,6 +3653,9 @@
 	kfree(table);
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3149,6 +3670,9 @@
 	if (!efx_dev_registered(efx))
 		return;
 
+	if (!table)
+		return;
+
 	/* Mark old filters that may need to be removed */
 	spin_lock_bh(&efx->filter_lock);
 	n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
@@ -3280,6 +3804,78 @@
 	WARN_ON(remove_failed);
 }
 
+static int efx_ef10_set_mac_address(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	bool was_enabled = efx->port_enabled;
+	int rc;
+
+	efx_device_detach_sync(efx);
+	efx_net_stop(efx->net_dev);
+	down_write(&efx->filter_sem);
+	efx_ef10_filter_table_remove(efx);
+
+	ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
+			efx->net_dev->dev_addr);
+	MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+		       nic_data->vport_id);
+	rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+			  sizeof(inbuf), NULL, 0, NULL);
+
+	efx_ef10_filter_table_probe(efx);
+	up_write(&efx->filter_sem);
+	if (was_enabled)
+		efx_net_open(efx->net_dev);
+	netif_device_attach(efx->net_dev);
+
+#if !defined(CONFIG_SFC_SRIOV)
+	if (rc == -EPERM)
+		netif_err(efx, drv, efx->net_dev,
+			  "Cannot change MAC address; use sfboot to enable mac-spoofing"
+			  " on this interface\n");
+#else
+	if (rc == -EPERM) {
+		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+		/* Switch to PF and change MAC address on vport */
+		if (efx->pci_dev->is_virtfn && pci_dev_pf) {
+			struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+			if (!efx_ef10_sriov_set_vf_mac(efx_pf,
+						       nic_data->vf_index,
+						       efx->net_dev->dev_addr))
+				return 0;
+		}
+		netif_err(efx, drv, efx->net_dev,
+			  "Cannot change MAC address; use sfboot to enable mac-spoofing"
+			  " on this interface\n");
+	} else if (efx->pci_dev->is_virtfn) {
+		/* Successfully changed by VF (with MAC spoofing), so update the
+		 * parent PF if possible.
+		 */
+		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+		if (pci_dev_pf) {
+			struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+			struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+			unsigned int i;
+
+			for (i = 0; i < efx_pf->vf_count; ++i) {
+				struct ef10_vf *vf = nic_data->vf + i;
+
+				if (vf->efx == efx) {
+					ether_addr_copy(vf->mac,
+							efx->net_dev->dev_addr);
+					return 0;
+				}
+			}
+		}
+	}
+#endif
+	return rc;
+}
+
 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
 {
 	efx_ef10_filter_sync_rx_mode(efx);
@@ -3287,6 +3883,13 @@
 	return efx_mcdi_set_mac(efx);
 }
 
+static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
+{
+	efx_ef10_filter_sync_rx_mode(efx);
+
+	return 0;
+}
+
 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
@@ -3494,6 +4097,9 @@
 	_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
 }
 
+static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
+					    u32 host_time) {}
+
 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
 					   bool temp)
 {
@@ -3571,6 +4177,12 @@
 	return 0;
 }
 
+static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
+					 struct hwtstamp_config *init)
+{
+	return -EOPNOTSUPP;
+}
+
 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
 				      struct hwtstamp_config *init)
 {
@@ -3607,14 +4219,16 @@
 	}
 }
 
-const struct efx_nic_type efx_hunt_a0_nic_type = {
+const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+	.is_vf = true,
+	.mem_bar = EFX_MEM_VF_BAR,
 	.mem_map_size = efx_ef10_mem_map_size,
-	.probe = efx_ef10_probe,
+	.probe = efx_ef10_probe_vf,
 	.remove = efx_ef10_remove,
 	.dimension_resources = efx_ef10_dimension_resources,
 	.init = efx_ef10_init_nic,
 	.fini = efx_port_dummy_op_void,
-	.map_reset_reason = efx_mcdi_map_reset_reason,
+	.map_reset_reason = efx_ef10_map_reset_reason,
 	.map_reset_flags = efx_ef10_map_reset_flags,
 	.reset = efx_ef10_reset,
 	.probe_port = efx_mcdi_port_probe,
@@ -3623,7 +4237,109 @@
 	.prepare_flr = efx_ef10_prepare_flr,
 	.finish_flr = efx_port_dummy_op_void,
 	.describe_stats = efx_ef10_describe_stats,
-	.update_stats = efx_ef10_update_stats,
+	.update_stats = efx_ef10_update_stats_vf,
+	.start_stats = efx_port_dummy_op_void,
+	.pull_stats = efx_port_dummy_op_void,
+	.stop_stats = efx_port_dummy_op_void,
+	.set_id_led = efx_mcdi_set_id_led,
+	.push_irq_moderation = efx_ef10_push_irq_moderation,
+	.reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+	.check_mac_fault = efx_mcdi_mac_check_fault,
+	.reconfigure_port = efx_mcdi_port_reconfigure,
+	.get_wol = efx_ef10_get_wol_vf,
+	.set_wol = efx_ef10_set_wol_vf,
+	.resume_wol = efx_port_dummy_op_void,
+	.mcdi_request = efx_ef10_mcdi_request,
+	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
+	.mcdi_read_response = efx_ef10_mcdi_read_response,
+	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+	.irq_enable_master = efx_port_dummy_op_void,
+	.irq_test_generate = efx_ef10_irq_test_generate,
+	.irq_disable_non_ev = efx_port_dummy_op_void,
+	.irq_handle_msi = efx_ef10_msi_interrupt,
+	.irq_handle_legacy = efx_ef10_legacy_interrupt,
+	.tx_probe = efx_ef10_tx_probe,
+	.tx_init = efx_ef10_tx_init,
+	.tx_remove = efx_ef10_tx_remove,
+	.tx_write = efx_ef10_tx_write,
+	.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+	.rx_probe = efx_ef10_rx_probe,
+	.rx_init = efx_ef10_rx_init,
+	.rx_remove = efx_ef10_rx_remove,
+	.rx_write = efx_ef10_rx_write,
+	.rx_defer_refill = efx_ef10_rx_defer_refill,
+	.ev_probe = efx_ef10_ev_probe,
+	.ev_init = efx_ef10_ev_init,
+	.ev_fini = efx_ef10_ev_fini,
+	.ev_remove = efx_ef10_ev_remove,
+	.ev_process = efx_ef10_ev_process,
+	.ev_read_ack = efx_ef10_ev_read_ack,
+	.ev_test_generate = efx_ef10_ev_test_generate,
+	.filter_table_probe = efx_ef10_filter_table_probe,
+	.filter_table_restore = efx_ef10_filter_table_restore,
+	.filter_table_remove = efx_ef10_filter_table_remove,
+	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+	.filter_insert = efx_ef10_filter_insert,
+	.filter_remove_safe = efx_ef10_filter_remove_safe,
+	.filter_get_safe = efx_ef10_filter_get_safe,
+	.filter_clear_rx = efx_ef10_filter_clear_rx,
+	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
+	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+	.filter_rfs_insert = efx_ef10_filter_rfs_insert,
+	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+	.mtd_probe = efx_port_dummy_op_int,
+#endif
+	.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
+	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+#ifdef CONFIG_SFC_SRIOV
+	.vswitching_probe = efx_ef10_vswitching_probe_vf,
+	.vswitching_restore = efx_ef10_vswitching_restore_vf,
+	.vswitching_remove = efx_ef10_vswitching_remove_vf,
+	.sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
+#endif
+	.get_mac_address = efx_ef10_get_mac_address_vf,
+	.set_mac_address = efx_ef10_set_mac_address,
+
+	.revision = EFX_REV_HUNT_A0,
+	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+	.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+	.can_rx_scatter = true,
+	.always_rx_scatter = true,
+	.max_interrupt_mode = EFX_INT_MODE_MSIX,
+	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+	.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			     NETIF_F_RXHASH | NETIF_F_NTUPLE),
+	.mcdi_max_ver = 2,
+	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+	.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+			    1 << HWTSTAMP_FILTER_ALL,
+};
+
+const struct efx_nic_type efx_hunt_a0_nic_type = {
+	.is_vf = false,
+	.mem_bar = EFX_MEM_BAR,
+	.mem_map_size = efx_ef10_mem_map_size,
+	.probe = efx_ef10_probe_pf,
+	.remove = efx_ef10_remove,
+	.dimension_resources = efx_ef10_dimension_resources,
+	.init = efx_ef10_init_nic,
+	.fini = efx_port_dummy_op_void,
+	.map_reset_reason = efx_ef10_map_reset_reason,
+	.map_reset_flags = efx_ef10_map_reset_flags,
+	.reset = efx_ef10_reset,
+	.probe_port = efx_mcdi_port_probe,
+	.remove_port = efx_mcdi_port_remove,
+	.fini_dmaq = efx_ef10_fini_dmaq,
+	.prepare_flr = efx_ef10_prepare_flr,
+	.finish_flr = efx_port_dummy_op_void,
+	.describe_stats = efx_ef10_describe_stats,
+	.update_stats = efx_ef10_update_stats_pf,
 	.start_stats = efx_mcdi_mac_start_stats,
 	.pull_stats = efx_mcdi_mac_pull_stats,
 	.stop_stats = efx_mcdi_mac_stop_stats,
@@ -3650,7 +4366,7 @@
 	.tx_init = efx_ef10_tx_init,
 	.tx_remove = efx_ef10_tx_remove,
 	.tx_write = efx_ef10_tx_write,
-	.rx_push_rss_config = efx_ef10_rx_push_rss_config,
+	.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
 	.rx_probe = efx_ef10_rx_probe,
 	.rx_init = efx_ef10_rx_init,
 	.rx_remove = efx_ef10_rx_remove,
@@ -3689,11 +4405,24 @@
 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
 	.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
 	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+	.sriov_configure = efx_ef10_sriov_configure,
 	.sriov_init = efx_ef10_sriov_init,
 	.sriov_fini = efx_ef10_sriov_fini,
-	.sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
 	.sriov_wanted = efx_ef10_sriov_wanted,
 	.sriov_reset = efx_ef10_sriov_reset,
+	.sriov_flr = efx_ef10_sriov_flr,
+	.sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
+	.sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
+	.sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
+	.sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
+	.sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
+	.vswitching_probe = efx_ef10_vswitching_probe_pf,
+	.vswitching_restore = efx_ef10_vswitching_restore_pf,
+	.vswitching_remove = efx_ef10_vswitching_remove_pf,
+#endif
+	.get_mac_address = efx_ef10_get_mac_address_pf,
+	.set_mac_address = efx_ef10_set_mac_address,
 
 	.revision = EFX_REV_HUNT_A0,
 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
new file mode 100644
index 0000000..6c9b6e4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -0,0 +1,783 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "ef10_sriov.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_pcol.h"
+
+static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
+				    unsigned int vf_fn)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
+	MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
+			      EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
+			      EVB_PORT_ASSIGN_IN_VF, vf_fn);
+
+	return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_add_mac(struct efx_nic *efx,
+				  unsigned int port_id, u8 *mac)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+	ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+	return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+			    sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_del_mac(struct efx_nic *efx,
+				  unsigned int port_id, u8 *mac)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+	ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+	return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+			    sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
+				  unsigned int vswitch_type)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+	MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
+	MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2);
+	MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
+			      VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
+
+	/* Quietly try to allocate 2 VLAN tags */
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
+				NULL, 0, NULL);
+
+	/* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */
+	if (rc == -EPROTO) {
+		MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1);
+		rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf,
+				  sizeof(inbuf), NULL, 0, NULL);
+	} else if (rc) {
+		efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC,
+				       MC_CMD_VSWITCH_ALLOC_IN_LEN,
+				       NULL, 0, rc);
+	}
+	return rc;
+}
+
+static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+	return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_alloc(struct efx_nic *efx,
+				unsigned int port_id_in,
+				unsigned int vport_type,
+				u16 vlan,
+				unsigned int *port_id_out)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	EFX_WARN_ON_PARANOID(!port_id_out);
+
+	MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
+	MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
+	MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS,
+		       (vlan != EFX_EF10_NO_VLAN));
+	MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
+			      VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
+	if (vlan != EFX_EF10_NO_VLAN)
+		MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS,
+				      VPORT_ALLOC_IN_VLAN_TAG_0, vlan);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+	if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
+		return -EIO;
+
+	*port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
+	return 0;
+}
+
+static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
+
+	return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+	return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+	return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	int i;
+
+	if (!nic_data->vf)
+		return;
+
+	for (i = 0; i < efx->vf_count; i++) {
+		struct ef10_vf *vf = nic_data->vf + i;
+
+		/* If VF is assigned, do not free the vport  */
+		if (vf->pci_dev &&
+		    vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+			continue;
+
+		if (vf->vport_assigned) {
+			efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
+			vf->vport_assigned = 0;
+		}
+
+		if (!is_zero_ether_addr(vf->mac)) {
+			efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+			eth_zero_addr(vf->mac);
+		}
+
+		if (vf->vport_id) {
+			efx_ef10_vport_free(efx, vf->vport_id);
+			vf->vport_id = 0;
+		}
+
+		vf->efx = NULL;
+	}
+}
+
+static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	efx_ef10_sriov_free_vf_vports(efx);
+	kfree(nic_data->vf);
+	nic_data->vf = NULL;
+}
+
+static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
+					  unsigned int vf_i)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct ef10_vf *vf = nic_data->vf + vf_i;
+	int rc;
+
+	if (WARN_ON_ONCE(!nic_data->vf))
+		return -EOPNOTSUPP;
+
+	rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+				  MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+				  vf->vlan, &vf->vport_id);
+	if (rc)
+		return rc;
+
+	rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+	if (rc) {
+		eth_zero_addr(vf->mac);
+		return rc;
+	}
+
+	rc =  efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+	if (rc)
+		return rc;
+
+	vf->vport_assigned = 1;
+	return 0;
+}
+
+static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	unsigned int i;
+	int rc;
+
+	nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
+			       GFP_KERNEL);
+	if (!nic_data->vf)
+		return -ENOMEM;
+
+	for (i = 0; i < efx->vf_count; i++) {
+		random_ether_addr(nic_data->vf[i].mac);
+		nic_data->vf[i].efx = NULL;
+		nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
+
+		rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+		if (rc)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	efx_ef10_sriov_free_vf_vports(efx);
+	kfree(nic_data->vf);
+	nic_data->vf = NULL;
+	return rc;
+}
+
+static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
+{
+	unsigned int i;
+	int rc;
+
+	for (i = 0; i < efx->vf_count; i++) {
+		rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+		if (rc)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	efx_ef10_sriov_free_vf_vswitching(efx);
+	return rc;
+}
+
+/* On top of the default firmware vswitch setup, create a VEB vswitch and
+ * expansion vport for use by this function.
+ */
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct net_device *net_dev = efx->net_dev;
+	int rc;
+
+	if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
+		/* vswitch not needed as we have no VFs */
+		efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+		return 0;
+	}
+
+	rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
+				    MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
+	if (rc)
+		goto fail1;
+
+	rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+				  MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+				  EFX_EF10_NO_VLAN, &nic_data->vport_id);
+	if (rc)
+		goto fail2;
+
+	rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+	if (rc)
+		goto fail3;
+	ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
+
+	rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+	if (rc)
+		goto fail4;
+
+	return 0;
+fail4:
+	efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+	eth_zero_addr(nic_data->vport_mac);
+fail3:
+	efx_ef10_vport_free(efx, nic_data->vport_id);
+	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+fail2:
+	efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
+fail1:
+	return rc;
+}
+
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+}
+
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	int rc;
+
+	if (!nic_data->must_probe_vswitching)
+		return 0;
+
+	rc = efx_ef10_vswitching_probe_pf(efx);
+	if (rc)
+		goto fail;
+
+	rc = efx_ef10_sriov_restore_vf_vswitching(efx);
+	if (rc)
+		goto fail;
+
+	nic_data->must_probe_vswitching = false;
+fail:
+	return rc;
+}
+
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	int rc;
+
+	if (!nic_data->must_probe_vswitching)
+		return 0;
+
+	rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+	if (rc)
+		return rc;
+
+	nic_data->must_probe_vswitching = false;
+	return 0;
+}
+
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	efx_ef10_sriov_free_vf_vswitching(efx);
+
+	efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+
+	if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+		return; /* No vswitch was ever created */
+
+	if (!is_zero_ether_addr(nic_data->vport_mac)) {
+		efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+				       efx->net_dev->dev_addr);
+		eth_zero_addr(nic_data->vport_mac);
+	}
+	efx_ef10_vport_free(efx, nic_data->vport_id);
+	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
+	/* Only free the vswitch if no VFs are assigned */
+	if (!pci_vfs_assigned(efx->pci_dev))
+		efx_ef10_vswitch_free(efx, nic_data->vport_id);
+}
+
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
+{
+	efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+}
+
+static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+	int rc = 0;
+	struct pci_dev *dev = efx->pci_dev;
+
+	efx->vf_count = num_vfs;
+
+	rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
+	if (rc)
+		goto fail1;
+
+	rc = pci_enable_sriov(dev, num_vfs);
+	if (rc)
+		goto fail2;
+
+	return 0;
+fail2:
+	efx_ef10_sriov_free_vf_vswitching(efx);
+fail1:
+	efx->vf_count = 0;
+	netif_err(efx, probe, efx->net_dev,
+		  "Failed to enable SRIOV VFs\n");
+	return rc;
+}
+
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+	struct pci_dev *dev = efx->pci_dev;
+	unsigned int vfs_assigned = 0;
+
+	vfs_assigned = pci_vfs_assigned(dev);
+
+	if (vfs_assigned && !force) {
+		netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+			   "please detach them before disabling SR-IOV\n");
+		return -EBUSY;
+	}
+
+	if (!vfs_assigned)
+		pci_disable_sriov(dev);
+
+	efx_ef10_sriov_free_vf_vswitching(efx);
+	efx->vf_count = 0;
+	return 0;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+	if (num_vfs == 0)
+		return efx_ef10_pci_sriov_disable(efx, false);
+	else
+		return efx_ef10_pci_sriov_enable(efx, num_vfs);
+}
+
+int efx_ef10_sriov_init(struct efx_nic *efx)
+{
+	return 0;
+}
+
+void efx_ef10_sriov_fini(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	unsigned int i;
+	int rc;
+
+	if (!nic_data->vf) {
+		/* Remove any un-assigned orphaned VFs */
+		if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+			pci_disable_sriov(efx->pci_dev);
+		return;
+	}
+
+	/* Remove any VFs in the host */
+	for (i = 0; i < efx->vf_count; ++i) {
+		struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+		if (vf_efx)
+			vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+	}
+
+	rc = efx_ef10_pci_sriov_disable(efx, true);
+	if (rc)
+		netif_dbg(efx, drv, efx->net_dev,
+			  "Disabling SRIOV was not successful rc=%d\n", rc);
+	else
+		netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
+}
+
+static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
+				     u8 *mac)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+	ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+			  sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+	return rc;
+}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct ef10_vf *vf;
+	int rc;
+
+	if (!nic_data->vf)
+		return -EOPNOTSUPP;
+
+	if (vf_i >= efx->vf_count)
+		return -EINVAL;
+	vf = nic_data->vf + vf_i;
+
+	if (vf->efx) {
+		efx_device_detach_sync(vf->efx);
+		efx_net_stop(vf->efx->net_dev);
+
+		down_write(&vf->efx->filter_sem);
+		vf->efx->type->filter_table_remove(vf->efx);
+
+		rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+		if (rc) {
+			up_write(&vf->efx->filter_sem);
+			return rc;
+		}
+	}
+
+	rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+	if (rc)
+		return rc;
+
+	if (!is_zero_ether_addr(vf->mac)) {
+		rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac);
+		if (rc)
+			return rc;
+	}
+
+	if (!is_zero_ether_addr(mac)) {
+		rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
+		if (rc) {
+			eth_zero_addr(vf->mac);
+			goto fail;
+		}
+		if (vf->efx)
+			ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+	}
+
+	ether_addr_copy(vf->mac, mac);
+
+	rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+	if (rc)
+		goto fail;
+
+	if (vf->efx) {
+		/* VF cannot use the vport_id that the PF created */
+		rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+		if (rc) {
+			up_write(&vf->efx->filter_sem);
+			return rc;
+		}
+		vf->efx->type->filter_table_probe(vf->efx);
+		up_write(&vf->efx->filter_sem);
+		efx_net_open(vf->efx->net_dev);
+		netif_device_attach(vf->efx->net_dev);
+	}
+
+	return 0;
+
+fail:
+	memset(vf->mac, 0, ETH_ALEN);
+	return rc;
+}
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
+			       u8 qos)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct ef10_vf *vf;
+	u16 old_vlan, new_vlan;
+	int rc = 0, rc2 = 0;
+
+	if (vf_i >= efx->vf_count)
+		return -EINVAL;
+	if (qos != 0)
+		return -EINVAL;
+
+	vf = nic_data->vf + vf_i;
+
+	new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan;
+	if (new_vlan == vf->vlan)
+		return 0;
+
+	if (vf->efx) {
+		efx_device_detach_sync(vf->efx);
+		efx_net_stop(vf->efx->net_dev);
+
+		down_write(&vf->efx->filter_sem);
+		vf->efx->type->filter_table_remove(vf->efx);
+
+		rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+		if (rc)
+			goto restore_filters;
+	}
+
+	if (vf->vport_assigned) {
+		rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+		if (rc) {
+			netif_warn(efx, drv, efx->net_dev,
+				   "Failed to change vlan on VF %d.\n", vf_i);
+			netif_warn(efx, drv, efx->net_dev,
+				   "This is likely because the VF is bound to a driver in a VM.\n");
+			netif_warn(efx, drv, efx->net_dev,
+				   "Please unload the driver in the VM.\n");
+			goto restore_vadaptor;
+		}
+		vf->vport_assigned = 0;
+	}
+
+	if (!is_zero_ether_addr(vf->mac)) {
+		rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+		if (rc)
+			goto restore_evb_port;
+	}
+
+	if (vf->vport_id) {
+		rc = efx_ef10_vport_free(efx, vf->vport_id);
+		if (rc)
+			goto restore_mac;
+		vf->vport_id = 0;
+	}
+
+	/* Do the actual vlan change */
+	old_vlan = vf->vlan;
+	vf->vlan = new_vlan;
+
+	/* Restore everything in reverse order */
+	rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+				  MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+				  vf->vlan, &vf->vport_id);
+	if (rc)
+		goto reset_nic;
+
+restore_mac:
+	if (!is_zero_ether_addr(vf->mac)) {
+		rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+		if (rc2) {
+			eth_zero_addr(vf->mac);
+			goto reset_nic;
+		}
+	}
+
+restore_evb_port:
+	rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+	if (rc2)
+		goto reset_nic;
+	else
+		vf->vport_assigned = 1;
+
+restore_vadaptor:
+	if (vf->efx) {
+		rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+		if (rc2)
+			goto reset_nic;
+	}
+
+restore_filters:
+	if (vf->efx) {
+		rc2 = vf->efx->type->filter_table_probe(vf->efx);
+		if (rc2)
+			goto reset_nic;
+
+		up_write(&vf->efx->filter_sem);
+
+		rc2 = efx_net_open(vf->efx->net_dev);
+		if (rc2)
+			goto reset_nic;
+
+		netif_device_attach(vf->efx->net_dev);
+	}
+	return rc;
+
+reset_nic:
+	if (vf->efx) {
+		up_write(&vf->efx->filter_sem);
+		netif_err(efx, drv, efx->net_dev,
+			  "Failed to restore VF - scheduling reset.\n");
+		efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+	} else {
+		netif_err(efx, drv, efx->net_dev,
+			  "Failed to restore the VF and cannot reset the VF "
+			  "- VF is not functional.\n");
+		netif_err(efx, drv, efx->net_dev,
+			  "Please reload the driver attached to the VF.\n");
+	}
+
+	return rc ? rc : rc2;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
+				   bool spoofchk)
+{
+	return spoofchk ? -EOPNOTSUPP : 0;
+}
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+				     int link_state)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO !=
+		     MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO);
+	BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE !=
+		     MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP);
+	BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE !=
+		     MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN);
+	MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+			      LINK_STATE_MODE_IN_FUNCTION_PF,
+			      nic_data->pf_index,
+			      LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+	MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state);
+	return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL); /* don't care what old mode was */
+}
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+				 struct ifla_vf_info *ivf)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN);
+
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct ef10_vf *vf;
+	size_t outlen;
+	int rc;
+
+	if (vf_i >= efx->vf_count)
+		return -EINVAL;
+
+	if (!nic_data->vf)
+		return -EOPNOTSUPP;
+
+	vf = nic_data->vf + vf_i;
+
+	ivf->vf = vf_i;
+	ivf->min_tx_rate = 0;
+	ivf->max_tx_rate = 0;
+	ether_addr_copy(ivf->mac, vf->mac);
+	ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan;
+	ivf->qos = 0;
+
+	MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+			      LINK_STATE_MODE_IN_FUNCTION_PF,
+			      nic_data->pf_index,
+			      LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+	MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE,
+		       MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE);
+	rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+	if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN)
+		return -EIO;
+	ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE);
+
+	return 0;
+}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+				    struct netdev_phys_item_id *ppid)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	if (!is_valid_ether_addr(nic_data->port_id))
+		return -EOPNOTSUPP;
+
+	ppid->id_len = ETH_ALEN;
+	memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
new file mode 100644
index 0000000..db4ef53
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -0,0 +1,69 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF10_SRIOV_H
+#define EF10_SRIOV_H
+
+#include "net_driver.h"
+
+/**
+ * struct ef10_vf - PF's store of VF data
+ * @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
+ * @vport_id: vport ID for the VF
+ * @vport_assigned: record whether the vport is currently assigned to the VF
+ * @mac: MAC address for the VF, zero when address is removed from the vport
+ * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN
+ */
+struct ef10_vf {
+	struct efx_nic *efx;
+	struct pci_dev *pci_dev;
+	unsigned int vport_id;
+	unsigned int vport_assigned;
+	u8 mac[ETH_ALEN];
+	u16 vlan;
+#define EFX_EF10_NO_VLAN       0
+};
+
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
+{
+	return false;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef10_sriov_init(struct efx_nic *efx);
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+void efx_ef10_sriov_fini(struct efx_nic *efx);
+static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
+			       u16 vlan, u8 qos);
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+				   bool spoofchk);
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+				 struct ifla_vf_info *ivf);
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+				     int link_state);
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+				    struct netdev_phys_item_id *ppid);
+
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+
+#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65944dd..804b9ad 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -26,6 +26,7 @@
 #include "efx.h"
 #include "nic.h"
 #include "selftest.h"
+#include "sriov.h"
 
 #include "mcdi.h"
 #include "workarounds.h"
@@ -76,6 +77,7 @@
 	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
 	[RESET_TYPE_WORLD]              = "WORLD",
 	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+	[RESET_TYPE_DATAPATH]           = "DATAPATH",
 	[RESET_TYPE_MC_BIST]		= "MC_BIST",
 	[RESET_TYPE_DISABLE]            = "DISABLE",
 	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
@@ -948,6 +950,16 @@
 
 static void efx_fini_port(struct efx_nic *efx);
 
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+	down_read(&efx->filter_sem);
+	efx->type->reconfigure_mac(efx);
+	up_read(&efx->filter_sem);
+}
+
 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  * the MAC appropriately. All other PHY configuration changes are pushed
  * through phy_op->set_settings(), and pushed asynchronously to the MAC
@@ -1001,7 +1013,7 @@
 
 	mutex_lock(&efx->mac_lock);
 	if (efx->port_enabled)
-		efx->type->reconfigure_mac(efx);
+		efx_mac_reconfigure(efx);
 	mutex_unlock(&efx->mac_lock);
 }
 
@@ -1041,11 +1053,11 @@
 
 	/* Reconfigure the MAC before creating dma queues (required for
 	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
-	efx->type->reconfigure_mac(efx);
+	efx_mac_reconfigure(efx);
 
 	/* Ensure the PHY advertises the correct flow control settings */
 	rc = efx->phy_op->reconfigure(efx);
-	if (rc)
+	if (rc && rc != -EPERM)
 		goto fail2;
 
 	mutex_unlock(&efx->mac_lock);
@@ -1067,7 +1079,7 @@
 	efx->port_enabled = true;
 
 	/* Ensure MAC ingress/egress is enabled */
-	efx->type->reconfigure_mac(efx);
+	efx_mac_reconfigure(efx);
 
 	mutex_unlock(&efx->mac_lock);
 }
@@ -1200,10 +1212,12 @@
 	struct pci_dev *pci_dev = efx->pci_dev;
 	dma_addr_t dma_mask = efx->type->max_dma_mask;
 	unsigned int mem_map_size = efx->type->mem_map_size(efx);
-	int rc;
+	int rc, bar;
 
 	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
 
+	bar = efx->type->mem_bar;
+
 	rc = pci_enable_device(pci_dev);
 	if (rc) {
 		netif_err(efx, probe, efx->net_dev,
@@ -1234,8 +1248,8 @@
 	netif_dbg(efx, probe, efx->net_dev,
 		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
 
-	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
-	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+	rc = pci_request_region(pci_dev, bar, "sfc");
 	if (rc) {
 		netif_err(efx, probe, efx->net_dev,
 			  "request for memory BAR failed\n");
@@ -1258,7 +1272,7 @@
 	return 0;
 
  fail4:
-	pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+	pci_release_region(efx->pci_dev, bar);
  fail3:
 	efx->membase_phys = 0;
  fail2:
@@ -1269,6 +1283,8 @@
 
 static void efx_fini_io(struct efx_nic *efx)
 {
+	int bar;
+
 	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
 
 	if (efx->membase) {
@@ -1277,11 +1293,23 @@
 	}
 
 	if (efx->membase_phys) {
-		pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+		bar = efx->type->mem_bar;
+		pci_release_region(efx->pci_dev, bar);
 		efx->membase_phys = 0;
 	}
 
-	pci_disable_device(efx->pci_dev);
+	/* Don't disable bus-mastering if VFs are assigned */
+	if (!pci_vfs_assigned(efx->pci_dev))
+		pci_disable_device(efx->pci_dev);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+		efx->rx_indir_table[i] =
+			ethtool_rxfh_indir_default(i, efx->rss_spread);
 }
 
 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
@@ -1314,15 +1342,19 @@
 	/* If RSS is requested for the PF *and* VFs then we can't write RSS
 	 * table entries that are inaccessible to VFs
 	 */
-	if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
-	    count > efx_vf_size(efx)) {
-		netif_warn(efx, probe, efx->net_dev,
-			   "Reducing number of RSS channels from %u to %u for "
-			   "VF support. Increase vf-msix-limit to use more "
-			   "channels on the PF.\n",
-			   count, efx_vf_size(efx));
-		count = efx_vf_size(efx);
+#ifdef CONFIG_SFC_SRIOV
+	if (efx->type->sriov_wanted) {
+		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+		    count > efx_vf_size(efx)) {
+			netif_warn(efx, probe, efx->net_dev,
+				   "Reducing number of RSS channels from %u to %u for "
+				   "VF support. Increase vf-msix-limit to use more "
+				   "channels on the PF.\n",
+				   count, efx_vf_size(efx));
+			count = efx_vf_size(efx);
+		}
 	}
+#endif
 
 	return count;
 }
@@ -1426,10 +1458,15 @@
 	}
 
 	/* RSS might be usable on VFs even if it is disabled on the PF */
-
-	efx->rss_spread = ((efx->n_rx_channels > 1 ||
-			    !efx->type->sriov_wanted(efx)) ?
-			   efx->n_rx_channels : efx_vf_size(efx));
+#ifdef CONFIG_SFC_SRIOV
+	if (efx->type->sriov_wanted) {
+		efx->rss_spread = ((efx->n_rx_channels > 1 ||
+				    !efx->type->sriov_wanted(efx)) ?
+				   efx->n_rx_channels : efx_vf_size(efx));
+		return 0;
+	}
+#endif
+	efx->rss_spread = efx->n_rx_channels;
 
 	return 0;
 }
@@ -1593,7 +1630,6 @@
 
 static int efx_probe_nic(struct efx_nic *efx)
 {
-	size_t i;
 	int rc;
 
 	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
@@ -1616,10 +1652,9 @@
 		goto fail2;
 
 	if (efx->n_channels > 1)
-		netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
-	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-		efx->rx_indir_table[i] =
-			ethtool_rxfh_indir_default(i, efx->rss_spread);
+		netdev_rss_key_fill(&efx->rx_hash_key,
+				    sizeof(efx->rx_hash_key));
+	efx_set_default_rx_indir_table(efx);
 
 	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
 	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -1650,10 +1685,11 @@
 	int rc;
 
 	spin_lock_init(&efx->filter_lock);
-
+	init_rwsem(&efx->filter_sem);
+	down_write(&efx->filter_sem);
 	rc = efx->type->filter_table_probe(efx);
 	if (rc)
-		return rc;
+		goto out_unlock;
 
 #ifdef CONFIG_RFS_ACCEL
 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
@@ -1662,12 +1698,14 @@
 					   GFP_KERNEL);
 		if (!efx->rps_flow_id) {
 			efx->type->filter_table_remove(efx);
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto out_unlock;
 		}
 	}
 #endif
-
-	return 0;
+out_unlock:
+	up_write(&efx->filter_sem);
+	return rc;
 }
 
 static void efx_remove_filters(struct efx_nic *efx)
@@ -1675,12 +1713,16 @@
 #ifdef CONFIG_RFS_ACCEL
 	kfree(efx->rps_flow_id);
 #endif
+	down_write(&efx->filter_sem);
 	efx->type->filter_table_remove(efx);
+	up_write(&efx->filter_sem);
 }
 
 static void efx_restore_filters(struct efx_nic *efx)
 {
+	down_read(&efx->filter_sem);
 	efx->type->filter_table_restore(efx);
+	up_read(&efx->filter_sem);
 }
 
 /**************************************************************************
@@ -1712,21 +1754,33 @@
 	}
 	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
 
+#ifdef CONFIG_SFC_SRIOV
+	rc = efx->type->vswitching_probe(efx);
+	if (rc) /* not fatal; the PF will still work fine */
+		netif_warn(efx, probe, efx->net_dev,
+			   "failed to setup vswitching rc=%d;"
+			   " VFs may not function\n", rc);
+#endif
+
 	rc = efx_probe_filters(efx);
 	if (rc) {
 		netif_err(efx, probe, efx->net_dev,
 			  "failed to create filter tables\n");
-		goto fail3;
+		goto fail4;
 	}
 
 	rc = efx_probe_channels(efx);
 	if (rc)
-		goto fail4;
+		goto fail5;
 
 	return 0;
 
- fail4:
+ fail5:
 	efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+	efx->type->vswitching_remove(efx);
+#endif
  fail3:
 	efx_remove_port(efx);
  fail2:
@@ -1816,6 +1870,9 @@
 {
 	efx_remove_channels(efx);
 	efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+	efx->type->vswitching_remove(efx);
+#endif
 	efx_remove_port(efx);
 	efx_remove_nic(efx);
 }
@@ -2059,7 +2116,7 @@
  *************************************************************************/
 
 /* Context: process, rtnl_lock() held. */
-static int efx_net_open(struct net_device *net_dev)
+int efx_net_open(struct net_device *net_dev)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	int rc;
@@ -2088,7 +2145,7 @@
  * Note that the kernel will ignore our return code; this method
  * should really be a void.
  */
-static int efx_net_stop(struct net_device *net_dev)
+int efx_net_stop(struct net_device *net_dev)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
@@ -2146,7 +2203,7 @@
 
 	mutex_lock(&efx->mac_lock);
 	net_dev->mtu = new_mtu;
-	efx->type->reconfigure_mac(efx);
+	efx_mac_reconfigure(efx);
 	mutex_unlock(&efx->mac_lock);
 
 	efx_start_all(efx);
@@ -2159,6 +2216,8 @@
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct sockaddr *addr = data;
 	u8 *new_addr = addr->sa_data;
+	u8 old_addr[6];
+	int rc;
 
 	if (!is_valid_ether_addr(new_addr)) {
 		netif_err(efx, drv, efx->net_dev,
@@ -2167,12 +2226,20 @@
 		return -EADDRNOTAVAIL;
 	}
 
+	/* save old address */
+	ether_addr_copy(old_addr, net_dev->dev_addr);
 	ether_addr_copy(net_dev->dev_addr, new_addr);
-	efx->type->sriov_mac_address_changed(efx);
+	if (efx->type->set_mac_address) {
+		rc = efx->type->set_mac_address(efx);
+		if (rc) {
+			ether_addr_copy(net_dev->dev_addr, old_addr);
+			return rc;
+		}
+	}
 
 	/* Reconfigure the MAC */
 	mutex_lock(&efx->mac_lock);
-	efx->type->reconfigure_mac(efx);
+	efx_mac_reconfigure(efx);
 	mutex_unlock(&efx->mac_lock);
 
 	return 0;
@@ -2199,7 +2266,7 @@
 	return 0;
 }
 
-static const struct net_device_ops efx_farch_netdev_ops = {
+static const struct net_device_ops efx_netdev_ops = {
 	.ndo_open		= efx_net_open,
 	.ndo_stop		= efx_net_stop,
 	.ndo_get_stats64	= efx_net_stats,
@@ -2212,10 +2279,12 @@
 	.ndo_set_rx_mode	= efx_set_rx_mode,
 	.ndo_set_features	= efx_set_features,
 #ifdef CONFIG_SFC_SRIOV
-	.ndo_set_vf_mac		= efx_siena_sriov_set_vf_mac,
-	.ndo_set_vf_vlan	= efx_siena_sriov_set_vf_vlan,
-	.ndo_set_vf_spoofchk	= efx_siena_sriov_set_vf_spoofchk,
-	.ndo_get_vf_config	= efx_siena_sriov_get_vf_config,
+	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
+	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
+	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
+	.ndo_get_vf_config	= efx_sriov_get_vf_config,
+	.ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
+	.ndo_get_phys_port_id   = efx_sriov_get_phys_port_id,
 #endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = efx_netpoll,
@@ -2229,29 +2298,6 @@
 #endif
 };
 
-static const struct net_device_ops efx_ef10_netdev_ops = {
-	.ndo_open		= efx_net_open,
-	.ndo_stop		= efx_net_stop,
-	.ndo_get_stats64	= efx_net_stats,
-	.ndo_tx_timeout		= efx_watchdog,
-	.ndo_start_xmit		= efx_hard_start_xmit,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_do_ioctl		= efx_ioctl,
-	.ndo_change_mtu		= efx_change_mtu,
-	.ndo_set_mac_address	= efx_set_mac_address,
-	.ndo_set_rx_mode	= efx_set_rx_mode,
-	.ndo_set_features	= efx_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= efx_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-	.ndo_busy_poll		= efx_busy_poll,
-#endif
-#ifdef CONFIG_RFS_ACCEL
-	.ndo_rx_flow_steer	= efx_filter_rfs,
-#endif
-};
-
 static void efx_update_name(struct efx_nic *efx)
 {
 	strcpy(efx->name, efx->net_dev->name);
@@ -2264,8 +2310,7 @@
 {
 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
 
-	if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
-	     net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+	if ((net_dev->netdev_ops == &efx_netdev_ops) &&
 	    event == NETDEV_CHANGENAME)
 		efx_update_name(netdev_priv(net_dev));
 
@@ -2284,6 +2329,28 @@
 }
 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	bool enable = count > 0 && *buf != '0';
+
+	mcdi->logging_enabled = enable;
+	return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
 static int efx_register_netdev(struct efx_nic *efx)
 {
 	struct net_device *net_dev = efx->net_dev;
@@ -2292,12 +2359,9 @@
 
 	net_dev->watchdog_timeo = 5 * HZ;
 	net_dev->irq = efx->pci_dev->irq;
-	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
-		net_dev->netdev_ops = &efx_ef10_netdev_ops;
+	net_dev->netdev_ops = &efx_netdev_ops;
+	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
 		net_dev->priv_flags |= IFF_UNICAST_FLT;
-	} else {
-		net_dev->netdev_ops = &efx_farch_netdev_ops;
-	}
 	net_dev->ethtool_ops = &efx_ethtool_ops;
 	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
@@ -2344,9 +2408,21 @@
 			  "failed to init net dev attributes\n");
 		goto fail_registered;
 	}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+	if (rc) {
+		netif_err(efx, drv, efx->net_dev,
+			  "failed to init net dev attributes\n");
+		goto fail_attr_mcdi_logging;
+	}
+#endif
 
 	return 0;
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
 fail_registered:
 	rtnl_lock();
 	efx_dissociate(efx);
@@ -2365,13 +2441,14 @@
 
 	BUG_ON(netdev_priv(efx->net_dev) != efx);
 
-	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
-	rtnl_lock();
-	unregister_netdevice(efx->net_dev);
-	efx->state = STATE_UNINIT;
-	rtnl_unlock();
+	if (efx_dev_registered(efx)) {
+		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+		device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+		unregister_netdev(efx->net_dev);
+	}
 }
 
 /**************************************************************************
@@ -2393,7 +2470,8 @@
 	efx_disable_interrupts(efx);
 
 	mutex_lock(&efx->mac_lock);
-	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+	    method != RESET_TYPE_DATAPATH)
 		efx->phy_op->fini(efx);
 	efx->type->fini(efx);
 }
@@ -2422,11 +2500,13 @@
 	if (!ok)
 		goto fail;
 
-	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+	    method != RESET_TYPE_DATAPATH) {
 		rc = efx->phy_op->init(efx);
 		if (rc)
 			goto fail;
-		if (efx->phy_op->reconfigure(efx))
+		rc = efx->phy_op->reconfigure(efx);
+		if (rc && rc != -EPERM)
 			netif_err(efx, drv, efx->net_dev,
 				  "could not restore PHY settings\n");
 	}
@@ -2434,8 +2514,20 @@
 	rc = efx_enable_interrupts(efx);
 	if (rc)
 		goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+	rc = efx->type->vswitching_restore(efx);
+	if (rc) /* not fatal; the PF will still work fine */
+		netif_warn(efx, probe, efx->net_dev,
+			   "failed to restore vswitching rc=%d;"
+			   " VFs may not function\n", rc);
+#endif
+
+	down_read(&efx->filter_sem);
 	efx_restore_filters(efx);
-	efx->type->sriov_reset(efx);
+	up_read(&efx->filter_sem);
+	if (efx->type->sriov_reset)
+		efx->type->sriov_reset(efx);
 
 	mutex_unlock(&efx->mac_lock);
 
@@ -2605,6 +2697,7 @@
 	case RESET_TYPE_WORLD:
 	case RESET_TYPE_DISABLE:
 	case RESET_TYPE_RECOVER_OR_DISABLE:
+	case RESET_TYPE_DATAPATH:
 	case RESET_TYPE_MC_BIST:
 	case RESET_TYPE_MCDI_TIMEOUT:
 		method = type;
@@ -2655,6 +2748,8 @@
 	 .driver_data = (unsigned long) &siena_a0_nic_type},
 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 	{0}			/* end of list */
@@ -2809,7 +2904,8 @@
 }
 
 /* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal).  A PF can call
+ * this on its VFs to ensure they are unbound first.
  */
 static void efx_pci_remove(struct pci_dev *pci_dev)
 {
@@ -2824,9 +2920,12 @@
 	efx_dissociate(efx);
 	dev_close(efx->net_dev);
 	efx_disable_interrupts(efx);
+	efx->state = STATE_UNINIT;
 	rtnl_unlock();
 
-	efx->type->sriov_fini(efx);
+	if (efx->type->sriov_fini)
+		efx->type->sriov_fini(efx);
+
 	efx_unregister_netdev(efx);
 
 	efx_mtd_remove(efx);
@@ -3008,7 +3107,8 @@
 	netif_info(efx, probe, efx->net_dev,
 		   "Solarflare NIC detected\n");
 
-	efx_probe_vpd_strings(efx);
+	if (!efx->type->is_vf)
+		efx_probe_vpd_strings(efx);
 
 	/* Set up basic I/O (BAR mappings etc) */
 	rc = efx_init_io(efx);
@@ -3023,10 +3123,12 @@
 	if (rc)
 		goto fail4;
 
-	rc = efx->type->sriov_init(efx);
-	if (rc)
-		netif_err(efx, probe, efx->net_dev,
-			  "SR-IOV can't be enabled rc %d\n", rc);
+	if (efx->type->sriov_init) {
+		rc = efx->type->sriov_init(efx);
+		if (rc)
+			netif_err(efx, probe, efx->net_dev,
+				  "SR-IOV can't be enabled rc %d\n", rc);
+	}
 
 	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
 
@@ -3058,6 +3160,26 @@
 	return rc;
 }
 
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+	int rc;
+	struct efx_nic *efx = pci_get_drvdata(dev);
+
+	if (efx->type->sriov_configure) {
+		rc = efx->type->sriov_configure(efx, num_vfs);
+		if (rc)
+			return rc;
+		else
+			return num_vfs;
+	} else
+		return -EOPNOTSUPP;
+}
+#endif
+
 static int efx_pm_freeze(struct device *dev)
 {
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
@@ -3280,6 +3402,9 @@
 	.remove		= efx_pci_remove,
 	.driver.pm	= &efx_pm_ops,
 	.err_handler	= &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+	.sriov_configure = efx_pci_sriov_configure,
+#endif
 };
 
 /**************************************************************************
@@ -3302,9 +3427,11 @@
 	if (rc)
 		goto err_notifier;
 
+#ifdef CONFIG_SFC_SRIOV
 	rc = efx_init_sriov();
 	if (rc)
 		goto err_sriov;
+#endif
 
 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
 	if (!reset_workqueue) {
@@ -3321,8 +3448,10 @@
  err_pci:
 	destroy_workqueue(reset_workqueue);
  err_reset:
+#ifdef CONFIG_SFC_SRIOV
 	efx_fini_sriov();
  err_sriov:
+#endif
 	unregister_netdevice_notifier(&efx_netdev_notifier);
  err_notifier:
 	return rc;
@@ -3334,7 +3463,9 @@
 
 	pci_unregister_driver(&efx_pci_driver);
 	destroy_workqueue(reset_workqueue);
+#ifdef CONFIG_SFC_SRIOV
 	efx_fini_sriov();
+#endif
 	unregister_netdevice_notifier(&efx_netdev_notifier);
 
 }
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2587c58..acb1e07 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,7 +15,12 @@
 #include "filter.h"
 
 /* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
 #define EFX_MEM_BAR 2
+#define EFX_MEM_VF_BAR 0
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
 
 /* TX */
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
@@ -32,6 +37,7 @@
 extern unsigned int efx_piobuf_size;
 
 /* RX */
+void efx_set_default_rx_indir_table(struct efx_nic *efx);
 void efx_rx_config_page_split(struct efx_nic *efx);
 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
@@ -71,6 +77,8 @@
 
 /* Filters */
 
+void efx_mac_reconfigure(struct efx_nic *efx);
+
 /**
  * efx_filter_insert_filter - add or replace a filter
  * @efx: NIC in which to insert the filter
@@ -220,6 +228,13 @@
 static inline void efx_mtd_remove(struct efx_nic *efx) {}
 #endif
 
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+	return 1 << efx->vi_scale;
+}
+#endif
+
 static inline void efx_schedule_channel(struct efx_channel *channel)
 {
 	netif_vdbg(channel->efx, intr, channel->efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d1dbb5f..c94f562 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@
  * @RESET_TYPE_WORLD: Reset as much as possible
  * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
  * unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
  * @RESET_TYPE_MC_BIST: MC entering BIST mode.
  * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
  * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
@@ -159,6 +160,7 @@
 	RESET_TYPE_ALL,
 	RESET_TYPE_WORLD,
 	RESET_TYPE_RECOVER_OR_DISABLE,
+	RESET_TYPE_DATAPATH,
 	RESET_TYPE_MC_BIST,
 	RESET_TYPE_DISABLE,
 	RESET_TYPE_MAX_METHOD,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4835bc0..0347976 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -734,7 +734,7 @@
 	/* Reconfigure the MAC. The PHY *may* generate a link state change event
 	 * if the user just changed the advertised capabilities, but there's no
 	 * harm doing this twice */
-	efx->type->reconfigure_mac(efx);
+	efx_mac_reconfigure(efx);
 
 out:
 	mutex_unlock(&efx->mac_lock);
@@ -1109,9 +1109,8 @@
 		return -EOPNOTSUPP;
 	if (!indir)
 		return 0;
-	memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
-	efx->type->rx_push_rss_config(efx);
-	return 0;
+
+	return efx->type->rx_push_rss_config(efx, true, indir);
 }
 
 static int efx_ethtool_get_ts_info(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index f166c8e..80e69af 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -477,16 +477,29 @@
  *
  **************************************************************************
  */
+static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+				    const u32 *rx_indir_table)
+{
+	(void) efx;
+	(void) user;
+	(void) rx_indir_table;
+	return -ENOSYS;
+}
 
-static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+					const u32 *rx_indir_table)
 {
 	efx_oword_t temp;
 
+	(void) user;
 	/* Set hash key for IPv4 */
 	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
 	efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
 
+	memcpy(efx->rx_indir_table, rx_indir_table,
+	       sizeof(efx->rx_indir_table));
 	efx_farch_rx_push_indir_table(efx);
+	return 0;
 }
 
 /**************************************************************************
@@ -2507,7 +2520,7 @@
 	falcon_init_rx_cfg(efx);
 
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-		falcon_b0_rx_push_rss_config(efx);
+		falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
 
 		/* Set destination of both TX and RX Flush events */
 		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2687,6 +2700,8 @@
  */
 
 const struct efx_nic_type falcon_a1_nic_type = {
+	.is_vf = false,
+	.mem_bar = EFX_MEM_BAR,
 	.mem_map_size = falcon_a1_mem_map_size,
 	.probe = falcon_probe_nic,
 	.remove = falcon_remove_nic,
@@ -2729,7 +2744,7 @@
 	.tx_init = efx_farch_tx_init,
 	.tx_remove = efx_farch_tx_remove,
 	.tx_write = efx_farch_tx_write,
-	.rx_push_rss_config = efx_port_dummy_op_void,
+	.rx_push_rss_config = dummy_rx_push_rss_config,
 	.rx_probe = efx_farch_rx_probe,
 	.rx_init = efx_farch_rx_init,
 	.rx_remove = efx_farch_rx_remove,
@@ -2766,11 +2781,6 @@
 	.mtd_write = falcon_mtd_write,
 	.mtd_sync = falcon_mtd_sync,
 #endif
-	.sriov_init = efx_falcon_sriov_init,
-	.sriov_fini = efx_falcon_sriov_fini,
-	.sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
-	.sriov_wanted = efx_falcon_sriov_wanted,
-	.sriov_reset = efx_falcon_sriov_reset,
 
 	.revision = EFX_REV_FALCON_A1,
 	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2788,6 +2798,8 @@
 };
 
 const struct efx_nic_type falcon_b0_nic_type = {
+	.is_vf = false,
+	.mem_bar = EFX_MEM_BAR,
 	.mem_map_size = falcon_b0_mem_map_size,
 	.probe = falcon_probe_nic,
 	.remove = falcon_remove_nic,
@@ -2867,11 +2879,6 @@
 	.mtd_write = falcon_mtd_write,
 	.mtd_sync = falcon_mtd_sync,
 #endif
-	.sriov_init = efx_falcon_sriov_init,
-	.sriov_fini = efx_falcon_sriov_fini,
-	.sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
-	.sriov_wanted = efx_falcon_sriov_wanted,
-	.sriov_reset = efx_falcon_sriov_reset,
 
 	.revision = EFX_REV_FALCON_B0,
 	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index bb89e96..f08266f 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -20,6 +20,8 @@
 #include "efx.h"
 #include "nic.h"
 #include "farch_regs.h"
+#include "sriov.h"
+#include "siena_sriov.h"
 #include "io.h"
 #include "workarounds.h"
 
@@ -1198,13 +1200,17 @@
 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
 			   channel->channel, ev_sub_data);
 		efx_farch_handle_tx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
 		efx_siena_sriov_tx_flush_done(efx, event);
+#endif
 		break;
 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
 			   channel->channel, ev_sub_data);
 		efx_farch_handle_rx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
 		efx_siena_sriov_rx_flush_done(efx, event);
+#endif
 		break;
 	case FSE_AZ_EVQ_INIT_DONE_EV:
 		netif_dbg(efx, hw, efx->net_dev,
@@ -1242,8 +1248,11 @@
 				  " RX Q %d is disabled.\n", ev_sub_data,
 				  ev_sub_data);
 			efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
-		} else
+		}
+#ifdef CONFIG_SFC_SRIOV
+		else
 			efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
 		break;
 	case FSE_BZ_TX_DSC_ERROR_EV:
 		if (ev_sub_data < EFX_VI_BASE) {
@@ -1252,8 +1261,11 @@
 				  " TX Q %d is disabled.\n", ev_sub_data,
 				  ev_sub_data);
 			efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
-		} else
+		}
+#ifdef CONFIG_SFC_SRIOV
+		else
 			efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
 		break;
 	default:
 		netif_vdbg(efx, hw, efx->net_dev,
@@ -1317,9 +1329,11 @@
 		case FSE_AZ_EV_CODE_DRIVER_EV:
 			efx_farch_handle_driver_event(channel, &event);
 			break;
+#ifdef CONFIG_SFC_SRIOV
 		case FSE_CZ_EV_CODE_USER_EV:
 			efx_siena_sriov_event(channel, &event);
 			break;
+#endif
 		case FSE_CZ_EV_CODE_MCDI_EV:
 			efx_mcdi_process_event(channel, &event);
 			break;
@@ -1685,28 +1699,32 @@
 	vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
 
 #ifdef CONFIG_SFC_SRIOV
-	if (efx->type->sriov_wanted(efx)) {
-		unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+	if (efx->type->sriov_wanted) {
+		if (efx->type->sriov_wanted(efx)) {
+			unsigned vi_dc_entries, buftbl_free;
+			unsigned entries_per_vf, vf_limit;
 
-		nic_data->vf_buftbl_base = buftbl_min;
+			nic_data->vf_buftbl_base = buftbl_min;
 
-		vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
-		vi_count = max(vi_count, EFX_VI_BASE);
-		buftbl_free = (sram_lim_qw - buftbl_min -
-			       vi_count * vi_dc_entries);
+			vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+			vi_count = max(vi_count, EFX_VI_BASE);
+			buftbl_free = (sram_lim_qw - buftbl_min -
+				       vi_count * vi_dc_entries);
 
-		entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
-				  efx_vf_size(efx));
-		vf_limit = min(buftbl_free / entries_per_vf,
-			       (1024U - EFX_VI_BASE) >> efx->vi_scale);
+			entries_per_vf = ((vi_dc_entries +
+					   EFX_VF_BUFTBL_PER_VI) *
+					  efx_vf_size(efx));
+			vf_limit = min(buftbl_free / entries_per_vf,
+				       (1024U - EFX_VI_BASE) >> efx->vi_scale);
 
-		if (efx->vf_count > vf_limit) {
-			netif_err(efx, probe, efx->net_dev,
-				  "Reducing VF count from from %d to %d\n",
-				  efx->vf_count, vf_limit);
-			efx->vf_count = vf_limit;
+			if (efx->vf_count > vf_limit) {
+				netif_err(efx, probe, efx->net_dev,
+					  "Reducing VF count from from %d to %d\n",
+					  efx->vf_count, vf_limit);
+				efx->vf_count = vf_limit;
+			}
+			vi_count += efx->vf_count * efx_vf_size(efx);
 		}
-		vi_count += efx->vf_count * efx_vf_size(efx);
 	}
 #endif
 
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d37928f..81640f8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/moduleparam.h>
 #include <asm/cmpxchg.h>
 #include "net_driver.h"
 #include "nic.h"
@@ -54,18 +55,32 @@
 static bool efx_mcdi_poll_once(struct efx_nic *efx);
 static void efx_mcdi_abandon(struct efx_nic *efx);
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+		 "Enable MCDI logging on newly-probed functions");
+#endif
+
 int efx_mcdi_init(struct efx_nic *efx)
 {
 	struct efx_mcdi_iface *mcdi;
 	bool already_attached;
-	int rc;
+	int rc = -ENOMEM;
 
 	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
 	if (!efx->mcdi)
-		return -ENOMEM;
+		goto fail;
 
 	mcdi = efx_mcdi(efx);
 	mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	/* consuming code assumes buffer is page-sized */
+	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+	if (!mcdi->logging_buffer)
+		goto fail1;
+	mcdi->logging_enabled = mcdi_logging_default;
+#endif
 	init_waitqueue_head(&mcdi->wq);
 	spin_lock_init(&mcdi->iface_lock);
 	mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@
 	/* Recover from a failed assertion before probing */
 	rc = efx_mcdi_handle_assertion(efx);
 	if (rc)
-		return rc;
+		goto fail2;
 
 	/* Let the MC (and BMC, if this is a LOM) know that the driver
 	 * is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@
 	if (rc) {
 		netif_err(efx, probe, efx->net_dev,
 			  "Unable to register driver with MCPU\n");
-		return rc;
+		goto fail2;
 	}
 	if (already_attached)
 		/* Not a fatal error */
@@ -102,6 +117,15 @@
 		efx->primary = efx;
 
 	return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+	kfree(efx->mcdi);
+	efx->mcdi = NULL;
+fail:
+	return rc;
 }
 
 void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@
 	/* Relinquish the device (back to the BMC, if this is a LOM) */
 	efx_mcdi_drv_attach(efx, false, NULL);
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
 	kfree(efx->mcdi);
 }
 
@@ -121,6 +149,9 @@
 				  const efx_dword_t *inbuf, size_t inlen)
 {
 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
 	efx_dword_t hdr[2];
 	size_t hdr_len;
 	u32 xflags, seqno;
@@ -165,6 +196,31 @@
 		hdr_len = 8;
 	}
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+		int bytes = 0;
+		int i;
+		/* Lengths should always be a whole number of dwords, so scream
+		 * if they're not.
+		 */
+		WARN_ON_ONCE(hdr_len % 4);
+		WARN_ON_ONCE(inlen % 4);
+
+		/* We own the logging buffer, as only one MCDI can be in
+		 * progress on a NIC at any one time.  So no need for locking.
+		 */
+		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+			bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+					  " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+			bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+					  " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+	}
+#endif
+
 	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
 
 	mcdi->new_epoch = false;
@@ -206,6 +262,9 @@
 {
 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 	unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
 	efx_dword_t hdr;
 
 	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@
 			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
 	}
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+		size_t hdr_len, data_len;
+		int bytes = 0;
+		int i;
+
+		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+		hdr_len = mcdi->resp_hdr_len / 4;
+		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+		 * to dword size, and the MCDI buffer is always dword size
+		 */
+		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+		/* We own the logging buffer, as only one MCDI can be in
+		 * progress on a NIC at any one time.  So no need for locking.
+		 */
+		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+			bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+					  " %08x", le32_to_cpu(hdr.u32[0]));
+		}
+
+		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+			efx->type->mcdi_read_response(efx, &hdr,
+					mcdi->resp_hdr_len + (i * 4), 4);
+			bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+					  " %08x", le32_to_cpu(hdr.u32[0]));
+		}
+
+		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+	}
+#endif
+
 	if (error && mcdi->resp_data_len == 0) {
 		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
 		mcdi->resprc = -EIO;
@@ -406,7 +498,7 @@
 	struct efx_mcdi_async_param *async;
 	size_t hdr_len, data_len, err_len;
 	efx_dword_t *outbuf;
-	MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+	MCDI_DECLARE_BUF_ERR(errbuf);
 	int rc;
 
 	if (cmpxchg(&mcdi->state,
@@ -534,7 +626,7 @@
 				size_t *outlen_actual, bool quiet)
 {
 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-	MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+	MCDI_DECLARE_BUF_ERR(errbuf);
 	int rc;
 
 	if (mcdi->mode == MCDI_MODE_POLL)
@@ -1035,7 +1127,9 @@
 		/* MAC stats are gather lazily.  We can ignore this. */
 		break;
 	case MCDI_EVENT_CODE_FLR:
-		efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+		if (efx->type->sriov_flr)
+			efx->type->sriov_flr(efx,
+					     MCDI_EVENT_FIELD(*event, FLR_VF));
 		break;
 	case MCDI_EVENT_CODE_PTP_RX:
 	case MCDI_EVENT_CODE_PTP_FAULT:
@@ -1081,9 +1175,7 @@
 
 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
 {
-	MCDI_DECLARE_BUF(outbuf,
-			 max(MC_CMD_GET_VERSION_OUT_LEN,
-			     MC_CMD_GET_CAPABILITIES_OUT_LEN));
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
 	size_t outlength;
 	const __le16 *ver_words;
 	size_t offset;
@@ -1108,19 +1200,11 @@
 	 * single version.  Report which variants are running.
 	 */
 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
-		BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
-		rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
-				  outbuf, sizeof(outbuf), &outlength);
-		if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
-			offset += snprintf(
-				buf + offset, len - offset, " rx? tx?");
-		else
-			offset += snprintf(
-				buf + offset, len - offset, " rx%x tx%x",
-				MCDI_WORD(outbuf,
-					  GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
-				MCDI_WORD(outbuf,
-					  GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+		struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+		offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
+				   nic_data->rx_dpcpu_fw_id,
+				   nic_data->tx_dpcpu_fw_id);
 
 		/* It's theoretically possible for the string to exceed 31
 		 * characters, though in practice the first three version
@@ -1150,10 +1234,26 @@
 	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
 	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
-			  outbuf, sizeof(outbuf), &outlen);
-	if (rc)
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), &outlen);
+	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+	 * specified will fail with EPERM, and we have to tell the MC we don't
+	 * care what firmware we get.
+	 */
+	if (rc == -EPERM) {
+		netif_dbg(efx, probe, efx->net_dev,
+			  "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+			       MC_CMD_FW_DONT_CARE);
+		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+					sizeof(inbuf), outbuf, sizeof(outbuf),
+					&outlen);
+	}
+	if (rc) {
+		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+				       outbuf, outlen, rc);
 		goto fail;
+	}
 	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
 		rc = -EIO;
 		goto fail;
@@ -1178,16 +1278,6 @@
 	 * and are completely trusted by firmware.  Abort probing
 	 * if that's not true for this function.
 	 */
-	if (driver_operating &&
-	    (efx->mcdi->fn_flags &
-	     (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
-	      1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
-	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
-	     1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
-		netif_err(efx, probe, efx->net_dev,
-			  "This driver version only supports one function per port\n");
-		return -ENODEV;
-	}
 
 	if (was_attached != NULL)
 		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -1385,10 +1475,13 @@
 	return rc;
 }
 
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
 static int efx_mcdi_read_assertion(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
-	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
 	unsigned int flags, index;
 	const char *reason;
 	size_t outlen;
@@ -1406,6 +1499,8 @@
 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
 					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
 					outbuf, sizeof(outbuf), &outlen);
+		if (rc == -EPERM)
+			return 0;
 	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
 
 	if (rc) {
@@ -1443,24 +1538,31 @@
 			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
 					   index));
 
-	return 0;
+	return 1;
 }
 
-static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+	int rc;
 
 	/* If the MC is running debug firmware, it might now be
 	 * waiting for a debugger to attach, but we just want it to
 	 * reboot.  We set a flag that makes the command a no-op if it
-	 * has already done so.  We don't know what return code to
-	 * expect (0 or -EIO), so ignore it.
+	 * has already done so.
+	 * The MCDI will thus return either 0 or -EIO.
 	 */
 	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
 	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
 		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
-	(void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
-			    NULL, 0, NULL);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+				NULL, 0, NULL);
+	if (rc == -EIO)
+		rc = 0;
+	if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+				       NULL, 0, rc);
+	return rc;
 }
 
 int efx_mcdi_handle_assertion(struct efx_nic *efx)
@@ -1468,12 +1570,10 @@
 	int rc;
 
 	rc = efx_mcdi_read_assertion(efx);
-	if (rc)
+	if (rc <= 0)
 		return rc;
 
-	efx_mcdi_exit_assertion(efx);
-
-	return 0;
+	return efx_mcdi_exit_assertion(efx);
 }
 
 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
@@ -1550,7 +1650,9 @@
 	if (rc)
 		return rc;
 
-	if (method == RESET_TYPE_WORLD)
+	if (method == RESET_TYPE_DATAPATH)
+		return 0;
+	else if (method == RESET_TYPE_WORLD)
 		return efx_mcdi_reset_mc(efx);
 	else
 		return efx_mcdi_reset_func(efx);
@@ -1688,6 +1790,36 @@
 			    NULL, 0, NULL);
 }
 
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+			     unsigned int *enabled_out)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	if (impl_out)
+		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+	if (enabled_out)
+		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+	return 0;
+
+fail:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
 #ifdef CONFIG_SFC_MTD
 
 #define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 56465f7..1838afe 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -58,6 +58,8 @@
  *	enabled
  * @async_list: Queue of asynchronous requests
  * @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
  */
 struct efx_mcdi_iface {
 	struct efx_nic *efx;
@@ -74,6 +76,10 @@
 	spinlock_t async_lock;
 	struct list_head async_list;
 	struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	char *logging_buffer;
+	bool logging_enabled;
+#endif
 };
 
 struct efx_mcdi_mon {
@@ -176,10 +182,12 @@
  * 32-bit-aligned.  Also, on Siena we must copy to the MC shared
  * memory strictly 32 bits at a time, so add any necessary padding.
  */
-#define MCDI_DECLARE_BUF(_name, _len)					\
+#define _MCDI_DECLARE_BUF(_name, _len)					\
 	efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
-#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len)			\
-	MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define MCDI_DECLARE_BUF(_name, _len)					\
+	_MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name)					\
+	MCDI_DECLARE_BUF(_name, 8)
 #define _MCDI_PTR(_buf, _offset)					\
 	((u8 *)(_buf) + (_offset))
 #define MCDI_PTR(_buf, _field)						\
@@ -339,6 +347,8 @@
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+			     unsigned int *enabled_out);
 
 #ifdef CONFIG_SFC_MCDI_MON
 int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e028de1..45fca9f 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -638,6 +638,8 @@
  */
 #define MC_CMD_READ32 0x1
 
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ32_IN msgrequest */
 #define    MC_CMD_READ32_IN_LEN 8
 #define       MC_CMD_READ32_IN_ADDR_OFST 0
@@ -659,6 +661,8 @@
  */
 #define MC_CMD_WRITE32 0x2
 
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
@@ -679,6 +683,8 @@
  */
 #define MC_CMD_COPYCODE 0x3
 
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_COPYCODE_IN msgrequest */
 #define    MC_CMD_COPYCODE_IN_LEN 16
 /* Source address */
@@ -717,6 +723,8 @@
  */
 #define MC_CMD_SET_FUNC 0x4
 
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_FUNC_IN msgrequest */
 #define    MC_CMD_SET_FUNC_IN_LEN 4
 /* Set function */
@@ -732,6 +740,8 @@
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
 
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
 #define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
 
@@ -758,6 +768,8 @@
  */
 #define MC_CMD_GET_ASSERTS 0x6
 
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_ASSERTS_IN msgrequest */
 #define    MC_CMD_GET_ASSERTS_IN_LEN 4
 /* Set to clear assertion */
@@ -794,6 +806,8 @@
  */
 #define MC_CMD_LOG_CTRL 0x7
 
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_LOG_CTRL_IN msgrequest */
 #define    MC_CMD_LOG_CTRL_IN_LEN 8
 /* Log destination */
@@ -814,6 +828,8 @@
  */
 #define MC_CMD_GET_VERSION 0x8
 
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VERSION_IN msgrequest */
 #define    MC_CMD_GET_VERSION_IN_LEN 0
 
@@ -870,6 +886,8 @@
  */
 #define MC_CMD_PTP 0xb
 
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_PTP_IN msgrequest */
 #define    MC_CMD_PTP_IN_LEN 1
 /* PTP operation code */
@@ -1404,6 +1422,8 @@
  */
 #define MC_CMD_CSR_READ32 0xc
 
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_CSR_READ32_IN msgrequest */
 #define    MC_CMD_CSR_READ32_IN_LEN 12
 /* Address */
@@ -1428,6 +1448,8 @@
  */
 #define MC_CMD_CSR_WRITE32 0xd
 
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
 #define    MC_CMD_CSR_WRITE32_IN_LENMAX 252
@@ -1452,6 +1474,8 @@
  */
 #define MC_CMD_HP 0x54
 
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_HP_IN msgrequest */
 #define    MC_CMD_HP_IN_LEN 16
 /* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
@@ -1493,6 +1517,8 @@
  */
 #define MC_CMD_STACKINFO 0xf
 
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_STACKINFO_IN msgrequest */
 #define    MC_CMD_STACKINFO_IN_LEN 0
 
@@ -1513,6 +1539,8 @@
  */
 #define MC_CMD_MDIO_READ 0x10
 
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_MDIO_READ_IN msgrequest */
 #define    MC_CMD_MDIO_READ_IN_LEN 16
 /* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1552,6 +1580,8 @@
  */
 #define MC_CMD_MDIO_WRITE 0x11
 
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_MDIO_WRITE_IN msgrequest */
 #define    MC_CMD_MDIO_WRITE_IN_LEN 20
 /* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1591,6 +1621,8 @@
  */
 #define MC_CMD_DBI_WRITE 0x12
 
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
 #define    MC_CMD_DBI_WRITE_IN_LENMAX 252
@@ -1739,6 +1771,8 @@
  */
 #define MC_CMD_GET_BOARD_CFG 0x18
 
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_BOARD_CFG_IN msgrequest */
 #define    MC_CMD_GET_BOARD_CFG_IN_LEN 0
 
@@ -1778,6 +1812,8 @@
  */
 #define MC_CMD_DBI_READX 0x19
 
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
 #define    MC_CMD_DBI_READX_IN_LENMAX 248
@@ -1822,6 +1858,8 @@
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
 
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_RAND_SEED_IN msgrequest */
 #define    MC_CMD_SET_RAND_SEED_IN_LEN 16
 /* Seed value. */
@@ -1863,6 +1901,8 @@
  */
 #define MC_CMD_DRV_ATTACH 0x1c
 
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_DRV_ATTACH_IN msgrequest */
 #define    MC_CMD_DRV_ATTACH_IN_LEN 12
 /* new state (0=detached, 1=attached) to set if UPDATE=1 */
@@ -1875,6 +1915,8 @@
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Only this option is allowed for non-admin functions */
+#define          MC_CMD_FW_DONT_CARE  0xffffffff
 
 /* MC_CMD_DRV_ATTACH_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
@@ -1920,6 +1962,8 @@
  */
 #define MC_CMD_PORT_RESET 0x20
 
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_PORT_RESET_IN msgrequest */
 #define    MC_CMD_PORT_RESET_IN_LEN 0
 
@@ -1934,6 +1978,7 @@
  * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
  */
 #define MC_CMD_ENTITY_RESET 0x20
+/*      MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
 
 /* MC_CMD_ENTITY_RESET_IN msgrequest */
 #define    MC_CMD_ENTITY_RESET_IN_LEN 4
@@ -2023,6 +2068,8 @@
  */
 #define MC_CMD_PUTS 0x23
 
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
@@ -2050,6 +2097,8 @@
  */
 #define MC_CMD_GET_PHY_CFG 0x24
 
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PHY_CFG_IN msgrequest */
 #define    MC_CMD_GET_PHY_CFG_IN_LEN 0
 
@@ -2149,6 +2198,8 @@
  */
 #define MC_CMD_START_BIST 0x25
 
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_START_BIST_IN msgrequest */
 #define    MC_CMD_START_BIST_IN_LEN 4
 /* Type of test. */
@@ -2185,6 +2236,8 @@
  */
 #define MC_CMD_POLL_BIST 0x26
 
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_POLL_BIST_IN msgrequest */
 #define    MC_CMD_POLL_BIST_IN_LEN 0
 
@@ -2344,6 +2397,8 @@
  */
 #define MC_CMD_GET_LOOPBACK_MODES 0x28
 
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
 #define    MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
 
@@ -2463,6 +2518,8 @@
  */
 #define MC_CMD_GET_LINK 0x29
 
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_LINK_IN msgrequest */
 #define    MC_CMD_GET_LINK_IN_LEN 0
 
@@ -2519,6 +2576,8 @@
  */
 #define MC_CMD_SET_LINK 0x2a
 
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_SET_LINK_IN msgrequest */
 #define    MC_CMD_SET_LINK_IN_LEN 16
 /* ??? */
@@ -2550,6 +2609,8 @@
  */
 #define MC_CMD_SET_ID_LED 0x2b
 
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_SET_ID_LED_IN msgrequest */
 #define    MC_CMD_SET_ID_LED_IN_LEN 4
 /* Set LED state. */
@@ -2568,6 +2629,8 @@
  */
 #define MC_CMD_SET_MAC 0x2c
 
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_SET_MAC_IN msgrequest */
 #define    MC_CMD_SET_MAC_IN_LEN 24
 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
@@ -2609,6 +2672,8 @@
  */
 #define MC_CMD_PHY_STATS 0x2d
 
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_PHY_STATS_IN msgrequest */
 #define    MC_CMD_PHY_STATS_IN_LEN 8
 /* ??? */
@@ -2687,8 +2752,10 @@
  */
 #define MC_CMD_MAC_STATS 0x2e
 
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_MAC_STATS_IN msgrequest */
-#define    MC_CMD_MAC_STATS_IN_LEN 16
+#define    MC_CMD_MAC_STATS_IN_LEN 20
 /* ??? */
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
@@ -2710,6 +2777,8 @@
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
 #define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
 
 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
 #define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -2824,11 +2893,31 @@
 /* enum: RXDP counter: Number of times an emergency descriptor fetch was
  * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
-#define          MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS  0x47
+#define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS  0x47
 /* enum: RXDP counter: Number of times the DPCPU waited for an existing
  * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
-#define          MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS  0x48
+#define          MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS  0x48
+#define          MC_CMD_MAC_VADAPTER_RX_DMABUF_START  0x4c /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS  0x4c /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES  0x4d /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS  0x4e /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES  0x4f /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS  0x50 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES  0x51 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS  0x52 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BAD_BYTES  0x53 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_OVERFLOW  0x54 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_DMABUF_START  0x57 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS  0x57 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES  0x58 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS  0x59 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES  0x5a /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS  0x5b /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES  0x5c /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS  0x5d /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BAD_BYTES  0x5e /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_OVERFLOW  0x5f /* enum */
 /* enum: Start of GMAC stats buffer space, for Siena only. */
 #define          MC_CMD_GMAC_DMABUF_START  0x40
 /* enum: End of GMAC stats buffer space, for Siena only. */
@@ -2926,6 +3015,8 @@
  */
 #define MC_CMD_WOL_FILTER_SET 0x32
 
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_SET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
 #define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
@@ -3020,6 +3111,8 @@
  */
 #define MC_CMD_WOL_FILTER_REMOVE 0x33
 
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
@@ -3035,6 +3128,8 @@
  */
 #define MC_CMD_WOL_FILTER_RESET 0x34
 
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
@@ -3069,6 +3164,8 @@
  */
 #define MC_CMD_NVRAM_TYPES 0x36
 
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_TYPES_IN msgrequest */
 #define    MC_CMD_NVRAM_TYPES_IN_LEN 0
 
@@ -3125,6 +3222,8 @@
  */
 #define MC_CMD_NVRAM_INFO 0x37
 
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_INFO_IN msgrequest */
 #define    MC_CMD_NVRAM_INFO_IN_LEN 4
 #define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
@@ -3157,6 +3256,8 @@
  */
 #define MC_CMD_NVRAM_UPDATE_START 0x38
 
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
 #define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
@@ -3175,6 +3276,8 @@
  */
 #define MC_CMD_NVRAM_READ 0x39
 
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_READ_IN msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_LEN 12
 #define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
@@ -3202,6 +3305,8 @@
  */
 #define MC_CMD_NVRAM_WRITE 0x3a
 
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_WRITE_IN msgrequest */
 #define    MC_CMD_NVRAM_WRITE_IN_LENMIN 13
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
@@ -3228,6 +3333,8 @@
  */
 #define MC_CMD_NVRAM_ERASE 0x3b
 
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_ERASE_IN msgrequest */
 #define    MC_CMD_NVRAM_ERASE_IN_LEN 12
 #define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
@@ -3248,6 +3355,8 @@
  */
 #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
 
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
@@ -3279,6 +3388,8 @@
  */
 #define MC_CMD_REBOOT 0x3d
 
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_REBOOT_IN msgrequest */
 #define    MC_CMD_REBOOT_IN_LEN 4
 #define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
@@ -3316,6 +3427,8 @@
  */
 #define MC_CMD_REBOOT_MODE 0x3f
 
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_REBOOT_MODE_IN msgrequest */
 #define    MC_CMD_REBOOT_MODE_IN_LEN 4
 #define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
@@ -3368,6 +3481,8 @@
  */
 #define MC_CMD_SENSOR_INFO 0x41
 
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SENSOR_INFO_IN msgrequest */
 #define    MC_CMD_SENSOR_INFO_IN_LEN 0
 
@@ -3542,6 +3657,8 @@
  */
 #define MC_CMD_READ_SENSORS 0x42
 
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
 /* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
@@ -3602,6 +3719,8 @@
  */
 #define MC_CMD_GET_PHY_STATE 0x43
 
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PHY_STATE_IN msgrequest */
 #define    MC_CMD_GET_PHY_STATE_IN_LEN 0
 
@@ -3636,6 +3755,8 @@
  */
 #define MC_CMD_WOL_FILTER_GET 0x45
 
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_GET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_GET_IN_LEN 0
 
@@ -3651,6 +3772,8 @@
  */
 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
 
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
@@ -3692,6 +3815,8 @@
  */
 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
 
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
@@ -3722,6 +3847,8 @@
  */
 #define MC_CMD_TESTASSERT 0x49
 
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_TESTASSERT_IN msgrequest */
 #define    MC_CMD_TESTASSERT_IN_LEN 0
 
@@ -3739,6 +3866,8 @@
  */
 #define MC_CMD_WORKAROUND 0x4a
 
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_WORKAROUND_IN msgrequest */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
@@ -3765,6 +3894,8 @@
  */
 #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
 
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
@@ -3788,6 +3919,8 @@
  */
 #define MC_CMD_NVRAM_TEST 0x4c
 
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_TEST_IN msgrequest */
 #define    MC_CMD_NVRAM_TEST_IN_LEN 4
 #define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
@@ -3849,6 +3982,8 @@
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
 
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
 #define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
 #define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
@@ -3890,6 +4025,8 @@
  */
 #define MC_CMD_NVRAM_PARTITIONS 0x51
 
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
 #define    MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
 
@@ -3913,6 +4050,8 @@
  */
 #define MC_CMD_NVRAM_METADATA 0x52
 
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_METADATA_IN msgrequest */
 #define    MC_CMD_NVRAM_METADATA_IN_LEN 4
 /* Partition type ID code */
@@ -3958,6 +4097,8 @@
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
 
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
 #define    MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
 
@@ -4087,11 +4228,66 @@
 
 
 /***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
 /* MC_CMD_READ_REGS
  * Get a dump of the MCPU registers
  */
 #define MC_CMD_READ_REGS 0x50
 
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ_REGS_IN msgrequest */
 #define    MC_CMD_READ_REGS_IN_LEN 0
 
@@ -4115,6 +4311,8 @@
  */
 #define MC_CMD_INIT_EVQ 0x80
 
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_INIT_EVQ_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_IN_LENMIN 44
 #define    MC_CMD_INIT_EVQ_IN_LENMAX 548
@@ -4213,6 +4411,8 @@
  */
 #define MC_CMD_INIT_RXQ 0x81
 
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_INIT_RXQ_IN msgrequest */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
@@ -4265,6 +4465,8 @@
  */
 #define MC_CMD_INIT_TXQ 0x82
 
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_INIT_TXQ_IN msgrequest */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
@@ -4322,6 +4524,8 @@
  */
 #define MC_CMD_FINI_EVQ 0x83
 
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FINI_EVQ_IN msgrequest */
 #define    MC_CMD_FINI_EVQ_IN_LEN 4
 /* Instance of EVQ to destroy. Should be the same instance as that previously
@@ -4339,6 +4543,8 @@
  */
 #define MC_CMD_FINI_RXQ 0x84
 
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FINI_RXQ_IN msgrequest */
 #define    MC_CMD_FINI_RXQ_IN_LEN 4
 /* Instance of RXQ to destroy */
@@ -4354,6 +4560,8 @@
  */
 #define MC_CMD_FINI_TXQ 0x85
 
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FINI_TXQ_IN msgrequest */
 #define    MC_CMD_FINI_TXQ_IN_LEN 4
 /* Instance of TXQ to destroy */
@@ -4369,6 +4577,8 @@
  */
 #define MC_CMD_DRIVER_EVENT 0x86
 
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_DRIVER_EVENT_IN msgrequest */
 #define    MC_CMD_DRIVER_EVENT_IN_LEN 12
 /* Handle of target EVQ */
@@ -4392,6 +4602,8 @@
  */
 #define MC_CMD_PROXY_CMD 0x5b
 
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PROXY_CMD_IN msgrequest */
 #define    MC_CMD_PROXY_CMD_IN_LEN 4
 /* The handle of the target function. */
@@ -4414,6 +4626,8 @@
  */
 #define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
 
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
 /* Owner ID to use */
@@ -4437,6 +4651,8 @@
  */
 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
 
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
@@ -4463,6 +4679,8 @@
  */
 #define MC_CMD_FREE_BUFTBL_CHUNK 0x89
 
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
 #define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
@@ -4477,6 +4695,8 @@
  */
 #define MC_CMD_FILTER_OP 0x8a
 
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FILTER_OP_IN msgrequest */
 #define    MC_CMD_FILTER_OP_IN_LEN 108
 /* identifies the type of operation requested */
@@ -4637,6 +4857,8 @@
  */
 #define MC_CMD_GET_PARSER_DISP_INFO 0xe4
 
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
 #define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
 /* identifies the type of operation requested */
@@ -4669,6 +4891,8 @@
  */
 #define MC_CMD_PARSER_DISP_RW 0xe5
 
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PARSER_DISP_RW_IN msgrequest */
 #define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
 /* identifies the target of the operation */
@@ -4719,6 +4943,8 @@
  */
 #define MC_CMD_GET_PF_COUNT 0xb6
 
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PF_COUNT_IN msgrequest */
 #define    MC_CMD_GET_PF_COUNT_IN_LEN 0
 
@@ -4750,6 +4976,8 @@
  */
 #define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
 
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
 #define    MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
 
@@ -4765,6 +4993,8 @@
  */
 #define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
 
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
 #define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
 /* Identifies the port assignment for this function. */
@@ -4780,6 +5010,8 @@
  */
 #define MC_CMD_ALLOC_VIS 0x8b
 
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_ALLOC_VIS_IN msgrequest */
 #define    MC_CMD_ALLOC_VIS_IN_LEN 8
 /* The minimum number of VIs that is acceptable */
@@ -4804,6 +5036,8 @@
  */
 #define MC_CMD_FREE_VIS 0x8c
 
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FREE_VIS_IN msgrequest */
 #define    MC_CMD_FREE_VIS_IN_LEN 0
 
@@ -4817,6 +5051,8 @@
  */
 #define MC_CMD_GET_SRIOV_CFG 0xba
 
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
 #define    MC_CMD_GET_SRIOV_CFG_IN_LEN 0
 
@@ -4841,6 +5077,8 @@
  */
 #define MC_CMD_SET_SRIOV_CFG 0xbb
 
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
 #define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
 /* Number of VFs currently enabled. */
@@ -4870,6 +5108,8 @@
  */
 #define MC_CMD_GET_VI_ALLOC_INFO 0x8d
 
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
 #define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
 
@@ -4889,6 +5129,8 @@
  */
 #define MC_CMD_DUMP_VI_STATE 0x8e
 
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_DUMP_VI_STATE_IN msgrequest */
 #define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
 /* The VI number to query. */
@@ -4998,6 +5240,8 @@
  */
 #define MC_CMD_ALLOC_PIOBUF 0x8f
 
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
 #define    MC_CMD_ALLOC_PIOBUF_IN_LEN 0
 
@@ -5013,6 +5257,8 @@
  */
 #define MC_CMD_FREE_PIOBUF 0x90
 
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_FREE_PIOBUF_IN msgrequest */
 #define    MC_CMD_FREE_PIOBUF_IN_LEN 4
 /* Handle for allocated push I/O buffer. */
@@ -5028,6 +5274,8 @@
  */
 #define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
 
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
 #define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
 /* VI number to get information for. */
@@ -5062,6 +5310,8 @@
  */
 #define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
 
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
 #define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
 /* VI number to set information for. */
@@ -5096,6 +5346,8 @@
  */
 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
 
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5157,6 +5409,8 @@
  */
 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
 
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5203,6 +5457,8 @@
  */
 #define MC_CMD_SATELLITE_DOWNLOAD 0x91
 
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
  * are subtle, and so downloads must proceed in a number of phases.
  *
@@ -5318,6 +5574,7 @@
  */
 #define MC_CMD_GET_CAPABILITIES 0xbe
 
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 /* MC_CMD_GET_CAPABILITIES_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
 
@@ -5343,6 +5600,8 @@
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5433,6 +5692,8 @@
  */
 #define MC_CMD_TCM_BUCKET_ALLOC 0xb2
 
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
 
@@ -5448,6 +5709,8 @@
  */
 #define MC_CMD_TCM_BUCKET_FREE 0xb3
 
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
 /* the bucket id */
@@ -5463,6 +5726,8 @@
  */
 #define MC_CMD_TCM_BUCKET_INIT 0xb4
 
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
 /* the bucket id */
@@ -5480,6 +5745,8 @@
  */
 #define MC_CMD_TCM_TXQ_INIT 0xb5
 
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
 #define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
 /* the txq id */
@@ -5511,6 +5778,8 @@
  */
 #define MC_CMD_LINK_PIOBUF 0x92
 
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_LINK_PIOBUF_IN msgrequest */
 #define    MC_CMD_LINK_PIOBUF_IN_LEN 8
 /* Handle for allocated push I/O buffer. */
@@ -5528,6 +5797,8 @@
  */
 #define MC_CMD_UNLINK_PIOBUF 0x93
 
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
 #define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
 /* Function Local Instance (VI) number. */
@@ -5543,6 +5814,8 @@
  */
 #define MC_CMD_VSWITCH_ALLOC 0x94
 
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
 #define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
 /* The port to connect to the v-switch's upstream port. */
@@ -5572,6 +5845,8 @@
  */
 #define MC_CMD_VSWITCH_FREE 0x95
 
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VSWITCH_FREE_IN msgrequest */
 #define    MC_CMD_VSWITCH_FREE_IN_LEN 4
 /* The port to which the v-switch is connected. */
@@ -5587,6 +5862,8 @@
  */
 #define MC_CMD_VPORT_ALLOC 0x96
 
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_ALLOC_IN msgrequest */
 #define    MC_CMD_VPORT_ALLOC_IN_LEN 20
 /* The port to which the v-switch is connected. */
@@ -5636,6 +5913,8 @@
  */
 #define MC_CMD_VPORT_FREE 0x97
 
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_FREE_IN msgrequest */
 #define    MC_CMD_VPORT_FREE_IN_LEN 4
 /* The handle of the v-port */
@@ -5651,8 +5930,10 @@
  */
 #define MC_CMD_VADAPTOR_ALLOC 0x98
 
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
-#define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+#define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
 /* The port to connect to the v-adaptor's port. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
 /* Flags controlling v-adaptor creation */
@@ -5661,6 +5942,19 @@
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define       MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define          MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC  0x0
 
 /* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -5672,6 +5966,8 @@
  */
 #define MC_CMD_VADAPTOR_FREE 0x99
 
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VADAPTOR_FREE_IN msgrequest */
 #define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
@@ -5682,11 +5978,53 @@
 
 
 /***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define       MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
 /* MC_CMD_EVB_PORT_ASSIGN
  * assign a port to a PCI function.
  */
 #define MC_CMD_EVB_PORT_ASSIGN 0x9a
 
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
 #define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
 /* The port to assign. */
@@ -5708,6 +6046,8 @@
  */
 #define MC_CMD_RDWR_A64_REGIONS 0x9b
 
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
 #define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
@@ -5736,6 +6076,8 @@
  */
 #define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
 
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
 #define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
 /* The handle of the owning upstream port */
@@ -5753,6 +6095,8 @@
  */
 #define MC_CMD_ONLOAD_STACK_FREE 0x9d
 
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
 #define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
 /* The handle of the Onload stack */
@@ -5768,6 +6112,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
 
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
 /* The handle of the owning upstream port */
@@ -5800,6 +6146,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_FREE 0x9f
 
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
 /* The handle of the RSS context */
@@ -5815,6 +6163,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
 
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
 /* The handle of the RSS context */
@@ -5833,6 +6183,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
 
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
 /* The handle of the RSS context */
@@ -5851,6 +6203,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
 
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
 /* The handle of the RSS context */
@@ -5869,6 +6223,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
 
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
 /* The handle of the RSS context */
@@ -5887,6 +6243,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
 
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
@@ -5912,6 +6270,8 @@
  */
 #define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
 
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
 /* The handle of the RSS context */
@@ -5937,6 +6297,8 @@
  */
 #define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
 
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
 /* The handle of the owning upstream port */
@@ -5959,6 +6321,8 @@
  */
 #define MC_CMD_DOT1P_MAPPING_FREE 0xa5
 
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
 /* The handle of the .1p mapping */
@@ -5974,6 +6338,8 @@
  */
 #define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
 
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
 /* The handle of the .1p mapping */
@@ -5994,6 +6360,8 @@
  */
 #define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
 
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
 /* The handle of the .1p mapping */
@@ -6014,6 +6382,8 @@
  */
 #define MC_CMD_GET_VECTOR_CFG 0xbf
 
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
 #define    MC_CMD_GET_VECTOR_CFG_IN_LEN 0
 
@@ -6033,6 +6403,8 @@
  */
 #define MC_CMD_SET_VECTOR_CFG 0xc0
 
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
 #define    MC_CMD_SET_VECTOR_CFG_IN_LEN 12
 /* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
@@ -6423,6 +6795,8 @@
  */
 #define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
 
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
 #define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
@@ -6441,6 +6815,8 @@
  */
 #define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
 
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
@@ -6459,6 +6835,8 @@
  */
 #define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
 
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
 /* The handle of the v-port */
@@ -6486,6 +6864,8 @@
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
 
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
 /* Index of the first buffer table entry. */
@@ -6510,6 +6890,8 @@
  */
 #define MC_CMD_SET_RXDP_CONFIG 0xc1
 
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
@@ -6526,6 +6908,8 @@
  */
 #define MC_CMD_GET_RXDP_CONFIG 0xc2
 
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
 
@@ -6890,6 +7274,8 @@
  */
 #define MC_CMD_GET_CLOCK 0xac
 
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_CLOCK_IN msgrequest */
 #define    MC_CMD_GET_CLOCK_IN_LEN 0
 
@@ -6907,6 +7293,8 @@
  */
 #define MC_CMD_SET_CLOCK 0xad
 
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_CLOCK_IN msgrequest */
 #define    MC_CMD_SET_CLOCK_IN_LEN 12
 /* Requested system frequency in MHz; 0 leaves unchanged. */
@@ -6932,6 +7320,8 @@
  */
 #define MC_CMD_DPCPU_RPC 0xae
 
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
@@ -7016,6 +7406,8 @@
  */
 #define MC_CMD_TRIGGER_INTERRUPT 0xe3
 
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
 #define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
 /* Interrupt level relative to base for function. */
@@ -7031,6 +7423,8 @@
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
 
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_CAP_BLK_READ_IN msgrequest */
 #define    MC_CMD_CAP_BLK_READ_IN_LEN 12
 #define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
@@ -7055,6 +7449,8 @@
  */
 #define MC_CMD_DUMP_DO 0xe8
 
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DUMP_DO_IN msgrequest */
 #define    MC_CMD_DUMP_DO_IN_LEN 52
 #define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
@@ -7108,6 +7504,8 @@
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
 
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
 #define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
@@ -7151,6 +7549,8 @@
  */
 #define MC_CMD_SET_PSU 0xea
 
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_PSU_IN msgrequest */
 #define    MC_CMD_SET_PSU_IN_LEN 12
 #define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
@@ -7171,6 +7571,8 @@
  */
 #define MC_CMD_GET_FUNCTION_INFO 0xec
 
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
 #define    MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
 
@@ -7188,6 +7590,8 @@
  */
 #define MC_CMD_ENABLE_OFFLINE_BIST 0xed
 
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
 #define    MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
 
@@ -7203,6 +7607,8 @@
  */
 #define MC_CMD_UART_SEND_DATA 0xee
 
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_UART_SEND_DATA_OUT msgrequest */
 #define    MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
 #define    MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
@@ -7231,6 +7637,8 @@
  */
 #define MC_CMD_UART_RECV_DATA 0xef
 
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_UART_RECV_DATA_OUT msgrequest */
 #define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
 /* CRC32 over OFFSET, LENGTH, RESERVED */
@@ -7266,6 +7674,8 @@
  */
 #define MC_CMD_READ_FUSES 0xf0
 
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ_FUSES_IN msgrequest */
 #define    MC_CMD_READ_FUSES_IN_LEN 8
 /* Offset in OTP to read */
@@ -7292,6 +7702,8 @@
  */
 #define MC_CMD_KR_TUNE 0xf1
 
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_KR_TUNE_IN msgrequest */
 #define    MC_CMD_KR_TUNE_IN_LENMIN 4
 #define    MC_CMD_KR_TUNE_IN_LENMAX 252
@@ -7550,6 +7962,8 @@
  */
 #define MC_CMD_PCIE_TUNE 0xf2
 
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PCIE_TUNE_IN msgrequest */
 #define    MC_CMD_PCIE_TUNE_IN_LENMIN 4
 #define    MC_CMD_PCIE_TUNE_IN_LENMAX 252
@@ -7711,6 +8125,8 @@
  */
 #define MC_CMD_LICENSING 0xf3
 
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_LICENSING_IN msgrequest */
 #define    MC_CMD_LICENSING_IN_LEN 4
 /* identifies the type of operation requested */
@@ -7756,6 +8172,8 @@
  */
 #define MC_CMD_MC2MC_PROXY 0xf4
 
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_MC2MC_PROXY_IN msgrequest */
 #define    MC_CMD_MC2MC_PROXY_IN_LEN 0
 
@@ -7771,6 +8189,8 @@
  */
 #define MC_CMD_GET_LICENSED_APP_STATE 0xf5
 
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
 #define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
 /* application ID to query (LICENSED_APP_ID_xxx) */
@@ -7792,6 +8212,8 @@
  */
 #define MC_CMD_LICENSED_APP_OP 0xf6
 
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_LICENSED_APP_OP_IN msgrequest */
 #define    MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
 #define    MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
@@ -7847,6 +8269,8 @@
  */
 #define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
 
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
@@ -7881,6 +8305,8 @@
  */
 #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
 
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
 #define    MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
 
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb19b70..7f295c4 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -865,6 +865,7 @@
 
 	BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
 
+	/* This has no effect on EF10 */
 	ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
 			efx->net_dev->dev_addr);
 
@@ -923,6 +924,7 @@
 static int efx_mcdi_mac_stats(struct efx_nic *efx,
 			      enum efx_stats_action action, int clear)
 {
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
 	int rc;
 	int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -944,9 +946,14 @@
 			      MAC_STATS_IN_PERIODIC_NOEVENT, 1,
 			      MAC_STATS_IN_PERIOD_MS, period);
 	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
-			  NULL, 0, NULL);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+				NULL, 0, NULL);
+	/* Expect ENOENT if DMA queues have not been set up */
+	if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+		efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+				       NULL, 0, rc);
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 325dd94..d72f522 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -25,6 +25,7 @@
 #include <linux/highmem.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/vmalloc.h>
 #include <linux/i2c.h>
 #include <linux/mtd/mtd.h>
@@ -793,7 +794,6 @@
 	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
 };
 
-struct efx_vf;
 struct vfdi_status;
 
 /**
@@ -897,7 +897,8 @@
  * @loopback_mode: Loopback status
  * @loopback_modes: Supported loopback mode bitmask
  * @loopback_selftest: Offline self-test private state
- * @filter_lock: Filter table lock
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
  * @filter_state: Architecture-dependent filter table state
  * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
  *	indexed by filter ID
@@ -909,7 +910,6 @@
  *	completed (either success or failure). Not used when MCDI is used to
  *	flush receive queues.
  * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
- * @vf: Array of &struct efx_vf objects.
  * @vf_count: Number of VFs intended to be enabled.
  * @vf_init_count: Number of VFs that have been fully initialised.
  * @vi_scale: log2 number of vnics per VF.
@@ -1040,6 +1040,7 @@
 
 	void *loopback_selftest;
 
+	struct rw_semaphore filter_sem;
 	spinlock_t filter_lock;
 	void *filter_state;
 #ifdef CONFIG_RFS_ACCEL
@@ -1053,7 +1054,6 @@
 	wait_queue_head_t flush_wq;
 
 #ifdef CONFIG_SFC_SRIOV
-	struct efx_vf *vf;
 	unsigned vf_count;
 	unsigned vf_init_count;
 	unsigned vi_scale;
@@ -1092,6 +1092,7 @@
 
 /**
  * struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
  * @mem_map_size: Get memory BAR mapped size
  * @probe: Probe the controller
  * @remove: Free resources allocated by probe()
@@ -1204,6 +1205,7 @@
  * @ptp_set_ts_config: Set hardware timestamp configuration.  The flags
  *	and tx_type will already have been validated but this operation
  *	must validate and update rx_filter.
+ * @set_mac_address: Set the MAC address of the device
  * @revision: Hardware architecture revision
  * @txd_ptr_tbl_base: TX descriptor ring base address
  * @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1226,6 +1228,8 @@
  * @hwtstamp_filters: Mask of hardware timestamp filter types supported
  */
 struct efx_nic_type {
+	bool is_vf;
+	unsigned int mem_bar;
 	unsigned int (*mem_map_size)(struct efx_nic *efx);
 	int (*probe)(struct efx_nic *efx);
 	void (*remove)(struct efx_nic *efx);
@@ -1277,7 +1281,8 @@
 	void (*tx_init)(struct efx_tx_queue *tx_queue);
 	void (*tx_remove)(struct efx_tx_queue *tx_queue);
 	void (*tx_write)(struct efx_tx_queue *tx_queue);
-	void (*rx_push_rss_config)(struct efx_nic *efx);
+	int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+				  const u32 *rx_indir_table);
 	int (*rx_probe)(struct efx_rx_queue *rx_queue);
 	void (*rx_init)(struct efx_rx_queue *rx_queue);
 	void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1330,11 +1335,28 @@
 	int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
 	int (*ptp_set_ts_config)(struct efx_nic *efx,
 				 struct hwtstamp_config *init);
+	int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
 	int (*sriov_init)(struct efx_nic *efx);
 	void (*sriov_fini)(struct efx_nic *efx);
-	void (*sriov_mac_address_changed)(struct efx_nic *efx);
 	bool (*sriov_wanted)(struct efx_nic *efx);
 	void (*sriov_reset)(struct efx_nic *efx);
+	void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+	int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+	int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+				 u8 qos);
+	int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+				     bool spoofchk);
+	int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+				   struct ifla_vf_info *ivi);
+	int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+				       int link_state);
+	int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+				      struct netdev_phys_item_id *ppid);
+	int (*vswitching_probe)(struct efx_nic *efx);
+	int (*vswitching_restore)(struct efx_nic *efx);
+	void (*vswitching_remove)(struct efx_nic *efx);
+	int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+	int (*set_mac_address)(struct efx_nic *efx);
 
 	int revision;
 	unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 93d10cbb..31ff908 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -381,6 +381,7 @@
  * @efx: Pointer back to main interface structure
  * @wol_filter_id: Wake-on-LAN packet filter id
  * @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
  * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
  * @vfdi_status: Common VFDI status page to be dmad to VF address space.
  * @local_addr_list: List of local addresses. Protected by %local_lock.
@@ -394,6 +395,7 @@
 	int wol_filter_id;
 	u64 stats[SIENA_STAT_COUNT];
 #ifdef CONFIG_SFC_SRIOV
+	struct siena_vf *vf;
 	struct efx_channel *vfdi_channel;
 	unsigned vf_buftbl_base;
 	struct efx_buffer vfdi_status;
@@ -405,59 +407,77 @@
 };
 
 enum {
-	EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
-	EF10_STAT_tx_packets,
-	EF10_STAT_tx_pause,
-	EF10_STAT_tx_control,
-	EF10_STAT_tx_unicast,
-	EF10_STAT_tx_multicast,
-	EF10_STAT_tx_broadcast,
-	EF10_STAT_tx_lt64,
-	EF10_STAT_tx_64,
-	EF10_STAT_tx_65_to_127,
-	EF10_STAT_tx_128_to_255,
-	EF10_STAT_tx_256_to_511,
-	EF10_STAT_tx_512_to_1023,
-	EF10_STAT_tx_1024_to_15xx,
-	EF10_STAT_tx_15xx_to_jumbo,
-	EF10_STAT_rx_bytes,
-	EF10_STAT_rx_bytes_minus_good_bytes,
-	EF10_STAT_rx_good_bytes,
-	EF10_STAT_rx_bad_bytes,
-	EF10_STAT_rx_packets,
-	EF10_STAT_rx_good,
-	EF10_STAT_rx_bad,
-	EF10_STAT_rx_pause,
-	EF10_STAT_rx_control,
+	EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+	EF10_STAT_port_tx_packets,
+	EF10_STAT_port_tx_pause,
+	EF10_STAT_port_tx_control,
+	EF10_STAT_port_tx_unicast,
+	EF10_STAT_port_tx_multicast,
+	EF10_STAT_port_tx_broadcast,
+	EF10_STAT_port_tx_lt64,
+	EF10_STAT_port_tx_64,
+	EF10_STAT_port_tx_65_to_127,
+	EF10_STAT_port_tx_128_to_255,
+	EF10_STAT_port_tx_256_to_511,
+	EF10_STAT_port_tx_512_to_1023,
+	EF10_STAT_port_tx_1024_to_15xx,
+	EF10_STAT_port_tx_15xx_to_jumbo,
+	EF10_STAT_port_rx_bytes,
+	EF10_STAT_port_rx_bytes_minus_good_bytes,
+	EF10_STAT_port_rx_good_bytes,
+	EF10_STAT_port_rx_bad_bytes,
+	EF10_STAT_port_rx_packets,
+	EF10_STAT_port_rx_good,
+	EF10_STAT_port_rx_bad,
+	EF10_STAT_port_rx_pause,
+	EF10_STAT_port_rx_control,
+	EF10_STAT_port_rx_unicast,
+	EF10_STAT_port_rx_multicast,
+	EF10_STAT_port_rx_broadcast,
+	EF10_STAT_port_rx_lt64,
+	EF10_STAT_port_rx_64,
+	EF10_STAT_port_rx_65_to_127,
+	EF10_STAT_port_rx_128_to_255,
+	EF10_STAT_port_rx_256_to_511,
+	EF10_STAT_port_rx_512_to_1023,
+	EF10_STAT_port_rx_1024_to_15xx,
+	EF10_STAT_port_rx_15xx_to_jumbo,
+	EF10_STAT_port_rx_gtjumbo,
+	EF10_STAT_port_rx_bad_gtjumbo,
+	EF10_STAT_port_rx_overflow,
+	EF10_STAT_port_rx_align_error,
+	EF10_STAT_port_rx_length_error,
+	EF10_STAT_port_rx_nodesc_drops,
+	EF10_STAT_port_rx_pm_trunc_bb_overflow,
+	EF10_STAT_port_rx_pm_discard_bb_overflow,
+	EF10_STAT_port_rx_pm_trunc_vfifo_full,
+	EF10_STAT_port_rx_pm_discard_vfifo_full,
+	EF10_STAT_port_rx_pm_trunc_qbb,
+	EF10_STAT_port_rx_pm_discard_qbb,
+	EF10_STAT_port_rx_pm_discard_mapping,
+	EF10_STAT_port_rx_dp_q_disabled_packets,
+	EF10_STAT_port_rx_dp_di_dropped_packets,
+	EF10_STAT_port_rx_dp_streaming_packets,
+	EF10_STAT_port_rx_dp_hlb_fetch,
+	EF10_STAT_port_rx_dp_hlb_wait,
 	EF10_STAT_rx_unicast,
+	EF10_STAT_rx_unicast_bytes,
 	EF10_STAT_rx_multicast,
+	EF10_STAT_rx_multicast_bytes,
 	EF10_STAT_rx_broadcast,
-	EF10_STAT_rx_lt64,
-	EF10_STAT_rx_64,
-	EF10_STAT_rx_65_to_127,
-	EF10_STAT_rx_128_to_255,
-	EF10_STAT_rx_256_to_511,
-	EF10_STAT_rx_512_to_1023,
-	EF10_STAT_rx_1024_to_15xx,
-	EF10_STAT_rx_15xx_to_jumbo,
-	EF10_STAT_rx_gtjumbo,
-	EF10_STAT_rx_bad_gtjumbo,
+	EF10_STAT_rx_broadcast_bytes,
+	EF10_STAT_rx_bad,
+	EF10_STAT_rx_bad_bytes,
 	EF10_STAT_rx_overflow,
-	EF10_STAT_rx_align_error,
-	EF10_STAT_rx_length_error,
-	EF10_STAT_rx_nodesc_drops,
-	EF10_STAT_rx_pm_trunc_bb_overflow,
-	EF10_STAT_rx_pm_discard_bb_overflow,
-	EF10_STAT_rx_pm_trunc_vfifo_full,
-	EF10_STAT_rx_pm_discard_vfifo_full,
-	EF10_STAT_rx_pm_trunc_qbb,
-	EF10_STAT_rx_pm_discard_qbb,
-	EF10_STAT_rx_pm_discard_mapping,
-	EF10_STAT_rx_dp_q_disabled_packets,
-	EF10_STAT_rx_dp_di_dropped_packets,
-	EF10_STAT_rx_dp_streaming_packets,
-	EF10_STAT_rx_dp_hlb_fetch,
-	EF10_STAT_rx_dp_hlb_wait,
+	EF10_STAT_tx_unicast,
+	EF10_STAT_tx_unicast_bytes,
+	EF10_STAT_tx_multicast,
+	EF10_STAT_tx_multicast_bytes,
+	EF10_STAT_tx_broadcast,
+	EF10_STAT_tx_broadcast_bytes,
+	EF10_STAT_tx_bad,
+	EF10_STAT_tx_bad_bytes,
+	EF10_STAT_tx_overflow,
 	EF10_STAT_COUNT
 };
 
@@ -483,12 +503,21 @@
  * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
  *	reboot
  * @rx_rss_context: Firmware handle for our RSS context
+ * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
  * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
  *	after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
  *	%MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
  */
 struct efx_ef10_nic_data {
 	struct efx_buffer mcdi_buf;
@@ -503,126 +532,27 @@
 	unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
 	bool must_restore_piobufs;
 	u32 rx_rss_context;
+	bool rx_rss_context_exclusive;
 	u64 stats[EF10_STAT_COUNT];
 	bool workaround_35388;
 	bool must_check_datapath_caps;
 	u32 datapath_caps;
+	unsigned int rx_dpcpu_fw_id;
+	unsigned int tx_dpcpu_fw_id;
+	unsigned int vport_id;
+	bool must_probe_vswitching;
+	unsigned int pf_index;
+	u8 port_id[ETH_ALEN];
+#ifdef CONFIG_SFC_SRIOV
+	unsigned int vf_index;
+	struct ef10_vf *vf;
+#endif
+	u8 vport_mac[ETH_ALEN];
 };
 
-/*
- * On the SFC9000 family each port is associated with 1 PCI physical
- * function (PF) handled by sfc and a configurable number of virtual
- * functions (VFs) that may be handled by some other driver, often in
- * a VM guest.  The queue pointer registers are mapped in both PF and
- * VF BARs such that an 8K region provides access to a single RX, TX
- * and event queue (collectively a Virtual Interface, VI or VNIC).
- *
- * The PF has access to all 1024 VIs while VFs are mapped to VIs
- * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
- * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
- * The number of VIs and the VI_SCALE value are configurable but must
- * be established at boot time by firmware.
- */
-
-/* Maximum VI_SCALE parameter supported by Siena */
-#define EFX_VI_SCALE_MAX 6
-/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
- * so this is the smallest allowed value. */
-#define EFX_VI_BASE 128U
-/* Maximum number of VFs allowed */
-#define EFX_VF_COUNT_MAX 127
-/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
-#define EFX_MAX_VF_EVQ_SIZE 8192UL
-/* The number of buffer table entries reserved for each VI on a VF */
-#define EFX_VF_BUFTBL_PER_VI					\
-	((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) *	\
-	 sizeof(efx_qword_t) / EFX_BUF_SIZE)
-
-#ifdef CONFIG_SFC_SRIOV
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
-{
-	return efx->vf_count != 0;
-}
-
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
-{
-	return efx->vf_init_count != 0;
-}
-
-static inline unsigned int efx_vf_size(struct efx_nic *efx)
-{
-	return 1 << efx->vi_scale;
-}
-
 int efx_init_sriov(void);
-void efx_siena_sriov_probe(struct efx_nic *efx);
-int efx_siena_sriov_init(struct efx_nic *efx);
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_siena_sriov_reset(struct efx_nic *efx);
-void efx_siena_sriov_fini(struct efx_nic *efx);
 void efx_fini_sriov(void);
 
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#else
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
-static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
-						 efx_qword_t *event) {}
-static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
-						 efx_qword_t *event) {}
-static inline void efx_siena_sriov_event(struct efx_channel *channel,
-					 efx_qword_t *event) {}
-static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
-						  unsigned dmaq) {}
-static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
-static inline void efx_fini_sriov(void) {}
-
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#endif
-
-/* FALCON */
-static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
-
-int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
-				u16 vlan, u8 qos);
-int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
-				  struct ifla_vf_info *ivf);
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-				    bool spoofchk);
-
 struct ethtool_ts_info;
 int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
 void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
@@ -654,6 +584,7 @@
 extern const struct efx_nic_type falcon_b0_nic_type;
 extern const struct efx_nic_type siena_a0_nic_type;
 extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
 
 /**************************************************************************
  *
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a2e9aee..ad62615 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -306,7 +306,7 @@
 	struct work_struct pps_work;
 	struct workqueue_struct *pps_workwq;
 	bool nic_ts_enabled;
-	MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+	_MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
 
 	unsigned int good_syncs;
 	unsigned int fast_syncs;
@@ -389,11 +389,8 @@
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
 	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), NULL);
-	if (rc) {
-		netif_err(efx, hw, efx->net_dev,
-			  "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+	if (rc)
 		memset(outbuf, 0, sizeof(outbuf));
-	}
 	efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
 			     efx_ptp_stat_mask,
 			     stats, _MCDI_PTR(outbuf, 0), false);
@@ -490,14 +487,20 @@
 	 */
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
-	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-			  outbuf, sizeof(outbuf), &out_len);
-	if (rc == 0)
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), &out_len);
+	if (rc == 0) {
 		fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
-	else if (rc == -EINVAL)
+	} else if (rc == -EINVAL) {
 		fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
-	else
+	} else if (rc == -EPERM) {
+		netif_info(efx, probe, efx->net_dev, "no PTP support\n");
 		return rc;
+	} else {
+		efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+				       outbuf, sizeof(outbuf), rc);
+		return rc;
+	}
 
 	if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
 		ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
@@ -541,8 +544,8 @@
 		       MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-			  outbuf, sizeof(outbuf), NULL);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), NULL);
 	if (rc == 0) {
 		efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
 			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
@@ -558,6 +561,8 @@
 		efx->ptp_data->ts_corrections.pps_out = 0;
 		efx->ptp_data->ts_corrections.pps_in = 0;
 	} else {
+		efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+				       sizeof(outbuf), rc);
 		return rc;
 	}
 
@@ -568,7 +573,7 @@
 static int efx_ptp_enable(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
-	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+	MCDI_DECLARE_BUF_ERR(outbuf);
 	int rc;
 
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
@@ -596,7 +601,7 @@
 static int efx_ptp_disable(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
-	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+	MCDI_DECLARE_BUF_ERR(outbuf);
 	int rc;
 
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
@@ -604,7 +609,12 @@
 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
 				outbuf, sizeof(outbuf), NULL);
 	rc = (rc == -EALREADY) ? 0 : rc;
-	if (rc)
+	/* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+	 * should only have been called during probe.
+	 */
+	if (rc == -ENOSYS || rc == -EPERM)
+		netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+	else if (rc)
 		efx_mcdi_display_error(efx, MC_CMD_PTP,
 				       MC_CMD_PTP_IN_DISABLE_LEN,
 				       outbuf, sizeof(outbuf), rc);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index f12c811..b323b91 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
 #include "mcdi.h"
 #include "mcdi_pcol.h"
 #include "selftest.h"
+#include "siena_sriov.h"
 
 /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
 
@@ -306,7 +307,9 @@
 	if (rc)
 		goto fail5;
 
+#ifdef CONFIG_SFC_SRIOV
 	efx_siena_sriov_probe(efx);
+#endif
 	efx_ptp_defer_probe_with_channel(efx);
 
 	return 0;
@@ -321,7 +324,8 @@
 	return rc;
 }
 
-static void siena_rx_push_rss_config(struct efx_nic *efx)
+static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
+				    const u32 *rx_indir_table)
 {
 	efx_oword_t temp;
 
@@ -343,7 +347,11 @@
 	       FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
 	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 
+	memcpy(efx->rx_indir_table, rx_indir_table,
+	       sizeof(efx->rx_indir_table));
 	efx_farch_rx_push_indir_table(efx);
+
+	return 0;
 }
 
 /* This call performs hardware-specific global initialisation, such as
@@ -386,7 +394,7 @@
 			    EFX_RX_USR_BUF_SIZE >> 5);
 	efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
-	siena_rx_push_rss_config(efx);
+	siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
 
 	/* Enable event logging */
 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -909,6 +917,8 @@
  */
 
 const struct efx_nic_type siena_a0_nic_type = {
+	.is_vf = false,
+	.mem_bar = EFX_MEM_BAR,
 	.mem_map_size = siena_mem_map_size,
 	.probe = siena_probe_nic,
 	.remove = siena_remove_nic,
@@ -996,11 +1006,22 @@
 #endif
 	.ptp_write_host_time = siena_ptp_write_host_time,
 	.ptp_set_ts_config = siena_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+	.sriov_configure = efx_siena_sriov_configure,
 	.sriov_init = efx_siena_sriov_init,
 	.sriov_fini = efx_siena_sriov_fini,
-	.sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
 	.sriov_wanted = efx_siena_sriov_wanted,
 	.sriov_reset = efx_siena_sriov_reset,
+	.sriov_flr = efx_siena_sriov_flr,
+	.sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
+	.sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+	.sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+	.sriov_get_vf_config = efx_siena_sriov_get_vf_config,
+	.vswitching_probe = efx_port_dummy_op_int,
+	.vswitching_restore = efx_port_dummy_op_int,
+	.vswitching_remove = efx_port_dummy_op_void,
+	.set_mac_address = efx_siena_sriov_mac_address_changed,
+#endif
 
 	.revision = EFX_REV_SIENA_A0,
 	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index fe83430..da7b94f 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -16,6 +16,7 @@
 #include "filter.h"
 #include "mcdi_pcol.h"
 #include "farch_regs.h"
+#include "siena_sriov.h"
 #include "vfdi.h"
 
 /* Number of longs required to track all the VIs in a VF */
@@ -38,7 +39,7 @@
 };
 
 /**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
  * @efx: The Efx NIC owning this VF
  * @pci_rid: The PCI requester ID for this VF
  * @pci_name: The PCI name (formatted address) of this VF
@@ -83,7 +84,7 @@
  * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
  * @reset_work: Work item to schedule a VF reset.
  */
-struct efx_vf {
+struct siena_vf {
 	struct efx_nic *efx;
 	unsigned int pci_rid;
 	char pci_name[13]; /* dddd:bb:dd.f */
@@ -189,7 +190,7 @@
  */
 static struct workqueue_struct *vfdi_workqueue;
 
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
 {
 	return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
 }
@@ -207,8 +208,8 @@
 	MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
 	MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
-			  outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+				outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
 	if (rc)
 		return rc;
 	if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -299,7 +300,7 @@
 /* The TX filter is entirely controlled by this driver, and is modified
  * underneath the feet of the VF
  */
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct efx_filter_spec filter;
@@ -343,7 +344,7 @@
 }
 
 /* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct efx_filter_spec filter;
@@ -382,7 +383,7 @@
 	}
 }
 
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct siena_nic_data *nic_data = efx->nic_data;
@@ -397,7 +398,7 @@
  * local_page_list, either by acquiring local_lock or by running from
  * efx_siena_sriov_peer_work()
  */
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct siena_nic_data *nic_data = efx->nic_data;
@@ -509,8 +510,9 @@
  * Optionally set VF index and VI index within the VF.
  */
 static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
-			 struct efx_vf **vf_out, unsigned *rel_index_out)
+			 struct siena_vf **vf_out, unsigned *rel_index_out)
 {
+	struct siena_nic_data *nic_data = efx->nic_data;
 	unsigned vf_i;
 
 	if (abs_index < EFX_VI_BASE)
@@ -520,13 +522,13 @@
 		return true;
 
 	if (vf_out)
-		*vf_out = efx->vf + vf_i;
+		*vf_out = nic_data->vf + vf_i;
 	if (rel_index_out)
 		*rel_index_out = abs_index % efx_vf_size(efx);
 	return false;
 }
 
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct vfdi_req *req = vf->buf.addr;
@@ -567,7 +569,7 @@
 	return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct vfdi_req *req = vf->buf.addr;
@@ -608,7 +610,7 @@
 	return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct vfdi_req *req = vf->buf.addr;
@@ -655,7 +657,7 @@
 }
 
 /* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
 {
 	/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
 	smp_mb();
@@ -664,7 +666,7 @@
 		atomic_read(&vf->rxq_retry_count);
 }
 
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
 {
 	memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
 	vf->txq_count = 0;
@@ -674,7 +676,7 @@
 	atomic_set(&vf->rxq_retry_count, 0);
 }
 
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	efx_oword_t reg;
@@ -757,7 +759,7 @@
 	return timeout ? 0 : VFDI_RC_ETIMEDOUT;
 }
 
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct siena_nic_data *nic_data = efx->nic_data;
@@ -789,7 +791,7 @@
 	return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct siena_nic_data *nic_data = efx->nic_data;
@@ -801,7 +803,7 @@
 	return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct siena_nic_data *nic_data = efx->nic_data;
@@ -846,7 +848,7 @@
 	return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
 {
 	mutex_lock(&vf->status_lock);
 	vf->status_addr = 0;
@@ -855,7 +857,7 @@
 	return VFDI_RC_SUCCESS;
 }
 
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
 
 static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
 	[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
@@ -870,7 +872,7 @@
 
 static void efx_siena_sriov_vfdi(struct work_struct *work)
 {
-	struct efx_vf *vf = container_of(work, struct efx_vf, req);
+	struct siena_vf *vf = container_of(work, struct siena_vf, req);
 	struct efx_nic *efx = vf->efx;
 	struct vfdi_req *req = vf->buf.addr;
 	struct efx_memcpy_req copy[2];
@@ -936,7 +938,8 @@
  * event ring in guest memory with VFDI reset events, then (re-initialise) the
  * event queue to raise an interrupt. The guest driver will then recover.
  */
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
 				     struct efx_buffer *buffer)
 {
 	struct efx_nic *efx = vf->efx;
@@ -1006,7 +1009,7 @@
 
 static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
 {
-	struct efx_vf *vf = container_of(work, struct efx_vf, req);
+	struct siena_vf *vf = container_of(work, struct siena_vf, req);
 	struct efx_nic *efx = vf->efx;
 	struct efx_buffer buf;
 
@@ -1055,8 +1058,10 @@
 	if (!max_vfs)
 		return;
 
-	if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+	if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
+		netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
 		return;
+	}
 	if (count > 0 && count > max_vfs)
 		count = max_vfs;
 
@@ -1077,7 +1082,7 @@
 						       peer_work);
 	struct efx_nic *efx = nic_data->efx;
 	struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	struct efx_local_addr *local_addr;
 	struct vfdi_endpoint *peer;
 	struct efx_endpoint_page *epp;
@@ -1099,7 +1104,7 @@
 	peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
 	peer_count = 1;
 	for (pos = 0; pos < efx->vf_count; ++pos) {
-		vf = efx->vf + pos;
+		vf = nic_data->vf + pos;
 
 		mutex_lock(&vf->status_lock);
 		if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
@@ -1155,7 +1160,7 @@
 
 	/* Finally, push the pages */
 	for (pos = 0; pos < efx->vf_count; ++pos) {
-		vf = efx->vf + pos;
+		vf = nic_data->vf + pos;
 
 		mutex_lock(&vf->status_lock);
 		if (vf->status_addr)
@@ -1190,14 +1195,16 @@
 static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
 {
 	unsigned index;
-	struct efx_vf *vf;
+	struct siena_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
 
-	efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
-	if (!efx->vf)
+	nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+			       GFP_KERNEL);
+	if (!nic_data->vf)
 		return -ENOMEM;
 
 	for (index = 0; index < efx->vf_count; ++index) {
-		vf = efx->vf + index;
+		vf = nic_data->vf + index;
 
 		vf->efx = efx;
 		vf->index = index;
@@ -1216,11 +1223,12 @@
 
 static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
 {
-	struct efx_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct siena_vf *vf;
 	unsigned int pos;
 
 	for (pos = 0; pos < efx->vf_count; ++pos) {
-		vf = efx->vf + pos;
+		vf = nic_data->vf + pos;
 
 		efx_nic_free_buffer(efx, &vf->buf);
 		kfree(vf->peer_page_addrs);
@@ -1237,7 +1245,7 @@
 	struct siena_nic_data *nic_data = efx->nic_data;
 	unsigned index, devfn, sriov, buftbl_base;
 	u16 offset, stride;
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	int rc;
 
 	sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
@@ -1250,7 +1258,7 @@
 	buftbl_base = nic_data->vf_buftbl_base;
 	devfn = pci_dev->devfn + offset;
 	for (index = 0; index < efx->vf_count; ++index) {
-		vf = efx->vf + index;
+		vf = nic_data->vf + index;
 
 		/* Reserve buffer entries */
 		vf->buftbl_base = buftbl_base;
@@ -1350,7 +1358,7 @@
 fail_vfs:
 	cancel_work_sync(&nic_data->peer_work);
 	efx_siena_sriov_free_local(efx);
-	kfree(efx->vf);
+	kfree(nic_data->vf);
 fail_alloc:
 	efx_nic_free_buffer(efx, &nic_data->vfdi_status);
 fail_status:
@@ -1361,7 +1369,7 @@
 
 void efx_siena_sriov_fini(struct efx_nic *efx)
 {
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	unsigned int pos;
 	struct siena_nic_data *nic_data = efx->nic_data;
 
@@ -1377,7 +1385,7 @@
 
 	/* Flush all reconfiguration work */
 	for (pos = 0; pos < efx->vf_count; ++pos) {
-		vf = efx->vf + pos;
+		vf = nic_data->vf + pos;
 		cancel_work_sync(&vf->req);
 		cancel_work_sync(&vf->reset_work);
 	}
@@ -1388,7 +1396,7 @@
 	/* Tear down back-end state */
 	efx_siena_sriov_vfs_fini(efx);
 	efx_siena_sriov_free_local(efx);
-	kfree(efx->vf);
+	kfree(nic_data->vf);
 	efx_nic_free_buffer(efx, &nic_data->vfdi_status);
 	efx_siena_sriov_cmd(efx, false, NULL, NULL);
 }
@@ -1396,7 +1404,7 @@
 void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
 {
 	struct efx_nic *efx = channel->efx;
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	unsigned qid, seq, type, data;
 
 	qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
@@ -1452,11 +1460,12 @@
 
 void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
 {
-	struct efx_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct siena_vf *vf;
 
 	if (vf_i > efx->vf_init_count)
 		return;
-	vf = efx->vf + vf_i;
+	vf = nic_data->vf + vf_i;
 	netif_info(efx, hw, efx->net_dev,
 		   "FLR on VF %s\n", vf->pci_name);
 
@@ -1467,21 +1476,23 @@
 	vf->evq0_count = 0;
 }
 
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
 {
 	struct siena_nic_data *nic_data = efx->nic_data;
 	struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
 
 	if (!efx->vf_init_count)
-		return;
+		return 0;
 	ether_addr_copy(vfdi_status->peers[0].mac_addr,
 			efx->net_dev->dev_addr);
 	queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+	return 0;
 }
 
 void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 {
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	unsigned queue, qid;
 
 	queue = EFX_QWORD_FIELD(*event,  FSF_AZ_DRIVER_EV_SUBDATA);
@@ -1500,7 +1511,7 @@
 
 void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 {
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	unsigned ev_failed, queue, qid;
 
 	queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
@@ -1525,7 +1536,7 @@
 /* Called from napi. Schedule the reset work item */
 void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
 {
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 	unsigned int rel;
 
 	if (map_vi_index(efx, dmaq, &vf, &rel))
@@ -1541,9 +1552,10 @@
 /* Reset all VFs */
 void efx_siena_sriov_reset(struct efx_nic *efx)
 {
+	struct siena_nic_data *nic_data = efx->nic_data;
 	unsigned int vf_i;
 	struct efx_buffer buf;
-	struct efx_vf *vf;
+	struct siena_vf *vf;
 
 	ASSERT_RTNL();
 
@@ -1557,7 +1569,7 @@
 		return;
 
 	for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
-		vf = efx->vf + vf_i;
+		vf = nic_data->vf + vf_i;
 		efx_siena_sriov_reset_vf(vf, &buf);
 	}
 
@@ -1573,7 +1585,6 @@
 	vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
 	if (!vfdi_workqueue)
 		return -ENOMEM;
-
 	return 0;
 }
 
@@ -1582,14 +1593,14 @@
 	destroy_workqueue(vfdi_workqueue);
 }
 
-int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
 {
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct siena_vf *vf;
 
 	if (vf_i >= efx->vf_init_count)
 		return -EINVAL;
-	vf = efx->vf + vf_i;
+	vf = nic_data->vf + vf_i;
 
 	mutex_lock(&vf->status_lock);
 	ether_addr_copy(vf->addr.mac_addr, mac);
@@ -1599,16 +1610,16 @@
 	return 0;
 }
 
-int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
 				u16 vlan, u8 qos)
 {
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct siena_vf *vf;
 	u16 tci;
 
 	if (vf_i >= efx->vf_init_count)
 		return -EINVAL;
-	vf = efx->vf + vf_i;
+	vf = nic_data->vf + vf_i;
 
 	mutex_lock(&vf->status_lock);
 	tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
@@ -1619,16 +1630,16 @@
 	return 0;
 }
 
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
 				    bool spoofchk)
 {
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct siena_vf *vf;
 	int rc;
 
 	if (vf_i >= efx->vf_init_count)
 		return -EINVAL;
-	vf = efx->vf + vf_i;
+	vf = nic_data->vf + vf_i;
 
 	mutex_lock(&vf->txq_lock);
 	if (vf->txq_count == 0) {
@@ -1643,16 +1654,16 @@
 	return rc;
 }
 
-int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
 				  struct ifla_vf_info *ivi)
 {
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_vf *vf;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct siena_vf *vf;
 	u16 tci;
 
 	if (vf_i >= efx->vf_init_count)
 		return -EINVAL;
-	vf = efx->vf + vf_i;
+	vf = nic_data->vf + vf_i;
 
 	ivi->vf = vf_i;
 	ether_addr_copy(ivi->mac, vf->addr.mac_addr);
@@ -1666,3 +1677,12 @@
 	return 0;
 }
 
+bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+	return efx->vf_count != 0;
+}
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+	return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
new file mode 100644
index 0000000..d88d4da
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef SIENA_SRIOV_H
+#define SIENA_SRIOV_H
+
+#include "net_driver.h"
+
+/* On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest.  The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value.
+ */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI					\
+	((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) *	\
+	 sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+bool efx_siena_sriov_wanted(struct efx_nic *efx);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
+				u16 vlan, u8 qos);
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+				    bool spoofchk);
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
+				  struct ifla_vf_info *ivf);
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+	return efx->vf_init_count != 0;
+}
+#else /* !CONFIG_SFC_SRIOV */
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+	return false;
+}
+#endif /* CONFIG_SFC_SRIOV */
+
+void efx_siena_sriov_probe(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+
+#endif /* SIENA_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
new file mode 100644
index 0000000..816c446
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_mac)
+		return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+			  u8 qos)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_vlan) {
+		if ((vlan & ~VLAN_VID_MASK) ||
+		    (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+			return -EINVAL;
+
+		return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+	} else {
+		return -EOPNOTSUPP;
+	}
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+			      bool spoofchk)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_spoofchk)
+		return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+			    struct ifla_vf_info *ivi)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_get_vf_config)
+		return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+				int link_state)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_link_state)
+		return efx->type->sriov_set_vf_link_state(efx, vf_i,
+							  link_state);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+			       struct netdev_phys_item_id *ppid)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_get_phys_port_id)
+		return efx->type->sriov_get_phys_port_id(efx, ppid);
+	else
+		return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
new file mode 100644
index 0000000..400df52
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -0,0 +1,31 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+			  u8 qos);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+			      bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+			    struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+				int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+			       struct netdev_phys_item_id *ppid);
+
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
index e832f46..fbbb21c 100644
--- a/drivers/net/ethernet/sgi/Kconfig
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on (PCI && SGI_IP27) || SGI_IP32
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -24,9 +22,7 @@
 	select CRC32
 	select MII
 	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card of this type, say Y here.
 
 config SGI_O2MACE_ETH
 	tristate "SGI O2 MACE Fast Ethernet support"
diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig
index 3409b3f..ac982be 100644
--- a/drivers/net/ethernet/silan/Kconfig
+++ b/drivers/net/ethernet/silan/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
index 68d052b..22ec98e 100644
--- a/drivers/net/ethernet/sis/Kconfig
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 3e97a8b..eb9230e 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -9,9 +9,7 @@
 		   ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
 		   PCMCIA || SUPERH || XTENSA
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -29,8 +27,7 @@
 	  option if you have a DELL laptop with the docking station, or
 	  another SMC9192/9194 based chipset.  Say Y if you want it compiled
 	  into the kernel, and read the file
-	  <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
-	  available from <http://www.tldp.org/docs.html#howto>.
+	  <file:Documentation/networking/smc9.txt>.
 
 	  To compile this driver as a module, choose M here. The module
 	  will be called smc9194.
@@ -46,8 +43,7 @@
 	  This is a driver for SMC's 91x series of Ethernet chipsets,
 	  including the SMC91C94 and the SMC91C111. Say Y if you want it
 	  compiled into the kernel, and read the file
-	  <file:Documentation/networking/smc9.txt>  and the Ethernet-HOWTO,
-	  available from  <http://www.tldp.org/docs.html#howto>.
+	  <file:Documentation/networking/smc9.txt>.
 
 	  This driver is also available as a module ( = code which can be
 	  inserted in and removed from the running kernel whenever you want).
@@ -85,9 +81,7 @@
 	---help---
 	  This is a driver for SMSC's LAN911x series of Ethernet chipsets
 	  including the new LAN9115, LAN9116, LAN9117, and LAN9118.
-	  Say Y if you want it compiled into the kernel,
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  Say Y here if you want it compiled into the kernel.
 
 	  This driver is also available as a module. The module will be
 	  called smc911x.  If you want to compile it as a module, say M
@@ -122,9 +116,7 @@
 	select SMSC_PHY
 	---help---
 	  This is a driver for SMSC's LAN9420 PCI ethernet adapter.
-	  Say Y if you want it compiled into the kernel,
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  Say Y here if you want it compiled into the kernel.
 
 	  This driver is also available as a module. The module will be
 	  called smsc9420.  If you want to compile it as a module, say M
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
index f4a80da..1c1157d 100644
--- a/drivers/net/ethernet/stmicro/Kconfig
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on HAS_IOMEM
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 7d3af19..cec147d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -16,6 +16,7 @@
 config STMMAC_PLATFORM
 	tristate "STMMAC Platform bus support"
 	depends on STMMAC_ETH
+	select MFD_SYSCON
 	default y
 	---help---
 	  This selects the platform specific bus support for the stmmac driver.
@@ -26,6 +27,95 @@
 
 	  If unsure, say N.
 
+if STMMAC_PLATFORM
+
+config DWMAC_GENERIC
+	tristate "Generic driver for DWMAC"
+	default STMMAC_PLATFORM
+	---help---
+	  Generic DWMAC driver for platforms that don't require any
+	  platform specific code to function or is using platform
+	  data for setup.
+
+config DWMAC_IPQ806X
+	tristate "QCA IPQ806x DWMAC support"
+	default ARCH_QCOM
+	depends on OF
+	select MFD_SYSCON
+	help
+	  Support for QCA IPQ806X DWMAC Ethernet.
+
+	  This selects the IPQ806x SoC glue layer support for the stmmac
+	  device driver. This driver does not use any of the hardware
+	  acceleration features available on this SoC. Network devices
+	  will behave like standard non-accelerated ethernet interfaces.
+
+config DWMAC_LPC18XX
+	tristate "NXP LPC18xx/43xx DWMAC support"
+	default ARCH_LPC18XX
+	depends on OF
+	select MFD_SYSCON
+	---help---
+	  Support for NXP LPC18xx/43xx DWMAC Ethernet.
+
+config DWMAC_MESON
+	tristate "Amlogic Meson dwmac support"
+	default ARCH_MESON
+	depends on OF
+	help
+	  Support for Ethernet controller on Amlogic Meson SoCs.
+
+	  This selects the Amlogic Meson SoC glue layer support for
+	  the stmmac device driver. This driver is used for Meson6 and
+	  Meson8 SoCs.
+
+config DWMAC_ROCKCHIP
+	tristate "Rockchip dwmac support"
+	default ARCH_ROCKCHIP
+	depends on OF
+	select MFD_SYSCON
+	help
+	  Support for Ethernet controller on Rockchip RK3288 SoC.
+
+	  This selects the Rockchip RK3288 SoC glue layer support for
+	  the stmmac device driver.
+
+config DWMAC_SOCFPGA
+	tristate "SOCFPGA dwmac support"
+	default ARCH_SOCFPGA
+	depends on OF
+	select MFD_SYSCON
+	help
+	  Support for ethernet controller on Altera SOCFPGA
+
+	  This selects the Altera SOCFPGA SoC glue layer support
+	  for the stmmac device driver. This driver is used for
+	  arria5 and cyclone5 FPGA SoCs.
+
+config DWMAC_STI
+	tristate "STi GMAC support"
+	default ARCH_STI
+	depends on OF
+	select MFD_SYSCON
+	---help---
+	  Support for ethernet controller on STi SOCs.
+
+	  This selects STi SoC glue layer support for the stmmac
+	  device driver. This driver is used on for the STi series
+	  SOCs GMAC ethernet controller.
+
+config DWMAC_SUNXI
+	tristate "Allwinner GMAC support"
+	default ARCH_SUNXI
+	depends on OF
+	---help---
+	  Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+	  This selects Allwinner SoC glue layer support for the
+	  stmmac device driver. This driver is used for A20/A31
+	  GMAC ethernet controller.
+endif
+
 config STMMAC_PCI
 	tristate "STMMAC PCI bus support"
 	depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 73c2715..b390161 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -4,9 +4,17 @@
 	      dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o	\
 	      mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
 
-obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
-stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o	\
-		       dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
+# Ordering matters. Generic driver must be last.
+obj-$(CONFIG_STMMAC_PLATFORM)	+= stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X)	+= dwmac-ipq806x.o
+obj-$(CONFIG_DWMAC_LPC18XX)	+= dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MESON)	+= dwmac-meson.o
+obj-$(CONFIG_DWMAC_ROCKCHIP)	+= dwmac-rk.o
+obj-$(CONFIG_DWMAC_SOCFPGA)	+= dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_STI)		+= dwmac-sti.o
+obj-$(CONFIG_DWMAC_SUNXI)	+= dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_GENERIC)	+= dwmac-generic.o
+stmmac-platform-objs:= stmmac_platform.o
 
 obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
 stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index ad39960..799c292 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -158,6 +158,8 @@
 			u32 buffer2_size:13;
 			u32 reserved4:3;
 		} etx;		/* -- enhanced -- */
+
+		u64 all_flags;
 	} des01;
 	unsigned int des2;
 	unsigned int des3;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
new file mode 100644
index 0000000..e817a1a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -0,0 +1,41 @@
+/*
+ * Generic DWMAC platform driver
+ *
+ * Copyright (C) 2007-2011  STMicroelectronics Ltd
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+static const struct of_device_id dwmac_generic_match[] = {
+	{ .compatible = "st,spear600-gmac"},
+	{ .compatible = "snps,dwmac-3.610"},
+	{ .compatible = "snps,dwmac-3.70a"},
+	{ .compatible = "snps,dwmac-3.710"},
+	{ .compatible = "snps,dwmac"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, dwmac_generic_match);
+
+static struct platform_driver dwmac_generic_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = STMMAC_RESOURCE_NAME,
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = of_match_ptr(dwmac_generic_match),
+	},
+};
+module_platform_driver(dwmac_generic_driver);
+
+MODULE_DESCRIPTION("Generic dwmac driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644
index 0000000..7e3129e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE			0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x)		BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x)	BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x)	BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x)	BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x)	BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0			0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x)		(x * 8)
+#define NSS_COMMON_CLK_DIV_MASK			0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL			0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)	(1 << x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x)	1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x)	((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL			0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x)	(1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x)			(0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ		BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL	BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET	8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET		0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK		0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000		1
+#define NSS_COMMON_CLK_DIV_RGMII_100		9
+#define NSS_COMMON_CLK_DIV_RGMII_10		99
+#define NSS_COMMON_CLK_DIV_SGMII_1000		0
+#define NSS_COMMON_CLK_DIV_SGMII_100		4
+#define NSS_COMMON_CLK_DIV_SGMII_10		49
+
+#define QSGMII_PCS_MODE_CTL			0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x)	BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL		0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST		BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x)			((x == 1) ? 0x134 : \
+						 (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN			BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN			BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN		BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN			BIT(3)
+#define QSGMII_PHY_QSGMII_EN			BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET	12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK		0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET		18
+#define QSGMII_PHY_RX_DC_BIAS_MASK		0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET		20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK		0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET		22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK		0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET		28
+#define QSGMII_PHY_TX_DRV_AMP_MASK		0xf
+
+struct ipq806x_gmac {
+	struct platform_device *pdev;
+	struct regmap *nss_common;
+	struct regmap *qsgmii_csr;
+	uint32_t id;
+	struct clk *core_clk;
+	phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+	struct device *dev = &gmac->pdev->dev;
+	int div;
+
+	switch (speed) {
+	case SPEED_1000:
+		div = NSS_COMMON_CLK_DIV_SGMII_1000;
+		break;
+
+	case SPEED_100:
+		div = NSS_COMMON_CLK_DIV_SGMII_100;
+		break;
+
+	case SPEED_10:
+		div = NSS_COMMON_CLK_DIV_SGMII_10;
+		break;
+
+	default:
+		dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+		return -EINVAL;
+	}
+
+	return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+	struct device *dev = &gmac->pdev->dev;
+	int div;
+
+	switch (speed) {
+	case SPEED_1000:
+		div = NSS_COMMON_CLK_DIV_RGMII_1000;
+		break;
+
+	case SPEED_100:
+		div = NSS_COMMON_CLK_DIV_RGMII_100;
+		break;
+
+	case SPEED_10:
+		div = NSS_COMMON_CLK_DIV_RGMII_10;
+		break;
+
+	default:
+		dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+		return -EINVAL;
+	}
+
+	return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+	uint32_t clk_bits, val;
+	int div;
+
+	switch (gmac->phy_mode) {
+	case PHY_INTERFACE_MODE_RGMII:
+		div = get_clk_div_rgmii(gmac, speed);
+		clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+			   NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+		break;
+
+	case PHY_INTERFACE_MODE_SGMII:
+		div = get_clk_div_sgmii(gmac, speed);
+		clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+			   NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+		break;
+
+	default:
+		dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+			phy_modes(gmac->phy_mode));
+		return -EINVAL;
+	}
+
+	/* Disable the clocks */
+	regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+	val &= ~clk_bits;
+	regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+	/* Set the divider */
+	regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+	val &= ~(NSS_COMMON_CLK_DIV_MASK
+		 << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+	val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+	regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+	/* Enable the clock back */
+	regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+	val |= clk_bits;
+	regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+	return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+	struct device *dev = &gmac->pdev->dev;
+
+	gmac->phy_mode = of_get_phy_mode(dev->of_node);
+	if (gmac->phy_mode < 0) {
+		dev_err(dev, "missing phy mode property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+		dev_err(dev, "missing qcom id property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* The GMACs are called 1 to 4 in the documentation, but to simplify the
+	 * code and keep it consistent with the Linux convention, we'll number
+	 * them from 0 to 3 here.
+	 */
+	if (gmac->id < 0 || gmac->id > 3) {
+		dev_err(dev, "invalid gmac id\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+	if (IS_ERR(gmac->core_clk)) {
+		dev_err(dev, "missing stmmaceth clk property\n");
+		return gmac->core_clk;
+	}
+	clk_set_rate(gmac->core_clk, 266000000);
+
+	/* Setup the register map for the nss common registers */
+	gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+							   "qcom,nss-common");
+	if (IS_ERR(gmac->nss_common)) {
+		dev_err(dev, "missing nss-common node\n");
+		return gmac->nss_common;
+	}
+
+	/* Setup the register map for the qsgmii csr registers */
+	gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+							   "qcom,qsgmii-csr");
+	if (IS_ERR(gmac->qsgmii_csr)) {
+		dev_err(dev, "missing qsgmii-csr node\n");
+		return gmac->qsgmii_csr;
+	}
+
+	return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ipq806x_gmac *gmac;
+	int val;
+	void *err;
+
+	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+	if (!gmac)
+		return ERR_PTR(-ENOMEM);
+
+	gmac->pdev = pdev;
+
+	err = ipq806x_gmac_of_parse(gmac);
+	if (err) {
+		dev_err(dev, "device tree parsing error\n");
+		return err;
+	}
+
+	regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+		     QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+	/* Inter frame gap is set to 12 */
+	val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+	      12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+	/* We also initiate an AXI low power exit request */
+	val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+	switch (gmac->phy_mode) {
+	case PHY_INTERFACE_MODE_RGMII:
+		val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+		break;
+	default:
+		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+			phy_modes(gmac->phy_mode));
+		return NULL;
+	}
+	regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+	/* Configure the clock src according to the mode */
+	regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+	val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+	switch (gmac->phy_mode) {
+	case PHY_INTERFACE_MODE_RGMII:
+		val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+			NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+			NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+		break;
+	default:
+		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+			phy_modes(gmac->phy_mode));
+		return NULL;
+	}
+	regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+	/* Enable PTP clock */
+	regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+	val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+	regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+	if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+		regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+			     QSGMII_PHY_CDR_EN |
+			     QSGMII_PHY_RX_FRONT_EN |
+			     QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+			     QSGMII_PHY_TX_DRIVER_EN |
+			     QSGMII_PHY_QSGMII_EN |
+			     0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+			     0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+			     0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+			     0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+			     0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+	}
+
+	return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+	struct ipq806x_gmac *gmac = priv;
+
+	ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+	.has_gmac	= 1,
+	.setup		= ipq806x_gmac_setup,
+	.fix_mac_speed	= ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+	{ .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+	.probe = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name		= "ipq806x-gmac-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table	= ipq806x_gmac_dwmac_match,
+	},
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
new file mode 100644
index 0000000..cb888d3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -0,0 +1,99 @@
+/*
+ * DWMAC glue for NXP LPC18xx/LPC43xx Ethernet
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* Register defines for CREG syscon */
+#define LPC18XX_CREG_CREG6			0x12c
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK	0x7
+# define LPC18XX_CREG_CREG6_ETHMODE_MII		0x0
+# define LPC18XX_CREG_CREG6_ETHMODE_RMII	0x4
+
+struct lpc18xx_dwmac_priv_data {
+	struct regmap *reg;
+	int interface;
+};
+
+static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+{
+	struct lpc18xx_dwmac_priv_data *dwmac;
+
+	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+	if (!dwmac)
+		return ERR_PTR(-ENOMEM);
+
+	dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
+	if (dwmac->interface < 0)
+		return ERR_PTR(dwmac->interface);
+
+	dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+	if (IS_ERR(dwmac->reg)) {
+		dev_err(&pdev->dev, "Syscon lookup failed\n");
+		return dwmac->reg;
+	}
+
+	return dwmac;
+}
+
+static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+	struct lpc18xx_dwmac_priv_data *dwmac = priv;
+	u8 ethmode;
+
+	if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+		ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
+	} else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+		ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
+	} else {
+		dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
+		return -EINVAL;
+	}
+
+	regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+			   LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+
+	return 0;
+}
+
+static const struct stmmac_of_data lpc18xx_dwmac_data = {
+	.has_gmac = 1,
+	.setup = lpc18xx_dwmac_setup,
+	.init = lpc18xx_dwmac_init,
+};
+
+static const struct of_device_id lpc18xx_dwmac_match[] = {
+	{ .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
+
+static struct platform_driver lpc18xx_dwmac_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "lpc18xx-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = lpc18xx_dwmac_match,
+	},
+};
+module_platform_driver(lpc18xx_dwmac_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("DWMAC glue for LPC18xx/43xx Ethernet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index cca028d..61a324a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -15,6 +15,7 @@
 #include <linux/ethtool.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/stmmac.h>
 
@@ -63,7 +64,28 @@
 	return dwmac;
 }
 
-const struct stmmac_of_data meson6_dwmac_data = {
+static const struct stmmac_of_data meson6_dwmac_data = {
 	.setup		= meson6_dwmac_setup,
 	.fix_mac_speed	= meson6_dwmac_fix_mac_speed,
 };
+
+static const struct of_device_id meson6_dwmac_match[] = {
+	{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
+
+static struct platform_driver meson6_dwmac_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "meson6-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = meson6_dwmac_match,
+	},
+};
+module_platform_driver(meson6_dwmac_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6249a4e..00a1e1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -22,23 +22,36 @@
 #include <linux/phy.h>
 #include <linux/of_net.h>
 #include <linux/gpio.h>
+#include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 
+#include "stmmac_platform.h"
+
+struct rk_priv_data;
+struct rk_gmac_ops {
+	void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
+			     int tx_delay, int rx_delay);
+	void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
+	void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+	void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+};
+
 struct rk_priv_data {
 	struct platform_device *pdev;
 	int phy_iface;
 	struct regulator *regulator;
+	struct rk_gmac_ops *ops;
 
 	bool clk_enabled;
 	bool clock_input;
 
 	struct clk *clk_mac;
-	struct clk *clk_mac_pll;
 	struct clk *gmac_clkin;
 	struct clk *mac_clk_rx;
 	struct clk *mac_clk_tx;
@@ -61,52 +74,146 @@
 
 #define RK3288_GRF_SOC_CON1	0x0248
 #define RK3288_GRF_SOC_CON3	0x0250
-#define RK3288_GRF_GPIO3D_E	0x01ec
-#define RK3288_GRF_GPIO4A_E	0x01f0
-#define RK3288_GRF_GPIO4B_E	0x01f4
 
 /*RK3288_GRF_SOC_CON1*/
-#define GMAC_PHY_INTF_SEL_RGMII	(GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
-#define GMAC_PHY_INTF_SEL_RMII	(GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
-#define GMAC_FLOW_CTRL		GRF_BIT(9)
-#define GMAC_FLOW_CTRL_CLR	GRF_CLR_BIT(9)
-#define GMAC_SPEED_10M		GRF_CLR_BIT(10)
-#define GMAC_SPEED_100M		GRF_BIT(10)
-#define GMAC_RMII_CLK_25M	GRF_BIT(11)
-#define GMAC_RMII_CLK_2_5M	GRF_CLR_BIT(11)
-#define GMAC_CLK_125M		(GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define GMAC_CLK_25M		(GRF_BIT(12) | GRF_BIT(13))
-#define GMAC_CLK_2_5M		(GRF_CLR_BIT(12) | GRF_BIT(13))
-#define GMAC_RMII_MODE		GRF_BIT(14)
-#define GMAC_RMII_MODE_CLR	GRF_CLR_BIT(14)
+#define RK3288_GMAC_PHY_INTF_SEL_RGMII	(GRF_BIT(6) | GRF_CLR_BIT(7) | \
+					 GRF_CLR_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL_RMII	(GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
+					 GRF_BIT(8))
+#define RK3288_GMAC_FLOW_CTRL		GRF_BIT(9)
+#define RK3288_GMAC_FLOW_CTRL_CLR	GRF_CLR_BIT(9)
+#define RK3288_GMAC_SPEED_10M		GRF_CLR_BIT(10)
+#define RK3288_GMAC_SPEED_100M		GRF_BIT(10)
+#define RK3288_GMAC_RMII_CLK_25M	GRF_BIT(11)
+#define RK3288_GMAC_RMII_CLK_2_5M	GRF_CLR_BIT(11)
+#define RK3288_GMAC_CLK_125M		(GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3288_GMAC_CLK_25M		(GRF_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_2_5M		(GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_RMII_MODE		GRF_BIT(14)
+#define RK3288_GMAC_RMII_MODE_CLR	GRF_CLR_BIT(14)
 
 /*RK3288_GRF_SOC_CON3*/
-#define GMAC_TXCLK_DLY_ENABLE	GRF_BIT(14)
-#define GMAC_TXCLK_DLY_DISABLE	GRF_CLR_BIT(14)
-#define GMAC_RXCLK_DLY_ENABLE	GRF_BIT(15)
-#define GMAC_RXCLK_DLY_DISABLE	GRF_CLR_BIT(15)
-#define GMAC_CLK_RX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 7)
-#define GMAC_CLK_TX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3288_GMAC_TXCLK_DLY_ENABLE	GRF_BIT(14)
+#define RK3288_GMAC_TXCLK_DLY_DISABLE	GRF_CLR_BIT(14)
+#define RK3288_GMAC_RXCLK_DLY_ENABLE	GRF_BIT(15)
+#define RK3288_GMAC_RXCLK_DLY_DISABLE	GRF_CLR_BIT(15)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 0)
 
-static void set_to_rgmii(struct rk_priv_data *bsp_priv,
-			 int tx_delay, int rx_delay)
+static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
+				int tx_delay, int rx_delay)
 {
 	struct device *dev = &bsp_priv->pdev->dev;
 
 	if (IS_ERR(bsp_priv->grf)) {
-		dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+		dev_err(dev, "Missing rockchip,grf property\n");
 		return;
 	}
 
 	regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
-		     GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
+		     RK3288_GMAC_PHY_INTF_SEL_RGMII |
+		     RK3288_GMAC_RMII_MODE_CLR);
 	regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
-		     GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
-		     GMAC_CLK_RX_DL_CFG(rx_delay) |
-		     GMAC_CLK_TX_DL_CFG(tx_delay));
+		     RK3288_GMAC_RXCLK_DLY_ENABLE |
+		     RK3288_GMAC_TXCLK_DLY_ENABLE |
+		     RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
+		     RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
 }
 
-static void set_to_rmii(struct rk_priv_data *bsp_priv)
+static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+	struct device *dev = &bsp_priv->pdev->dev;
+
+	if (IS_ERR(bsp_priv->grf)) {
+		dev_err(dev, "Missing rockchip,grf property\n");
+		return;
+	}
+
+	regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+		     RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
+}
+
+static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+	struct device *dev = &bsp_priv->pdev->dev;
+
+	if (IS_ERR(bsp_priv->grf)) {
+		dev_err(dev, "Missing rockchip,grf property\n");
+		return;
+	}
+
+	if (speed == 10)
+		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+			     RK3288_GMAC_CLK_2_5M);
+	else if (speed == 100)
+		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+			     RK3288_GMAC_CLK_25M);
+	else if (speed == 1000)
+		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+			     RK3288_GMAC_CLK_125M);
+	else
+		dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+	struct device *dev = &bsp_priv->pdev->dev;
+
+	if (IS_ERR(bsp_priv->grf)) {
+		dev_err(dev, "Missing rockchip,grf property\n");
+		return;
+	}
+
+	if (speed == 10) {
+		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+			     RK3288_GMAC_RMII_CLK_2_5M |
+			     RK3288_GMAC_SPEED_10M);
+	} else if (speed == 100) {
+		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+			     RK3288_GMAC_RMII_CLK_25M |
+			     RK3288_GMAC_SPEED_100M);
+	} else {
+		dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+	}
+}
+
+struct rk_gmac_ops rk3288_ops = {
+	.set_to_rgmii = rk3288_set_to_rgmii,
+	.set_to_rmii = rk3288_set_to_rmii,
+	.set_rgmii_speed = rk3288_set_rgmii_speed,
+	.set_rmii_speed = rk3288_set_rmii_speed,
+};
+
+#define RK3368_GRF_SOC_CON15	0x043c
+#define RK3368_GRF_SOC_CON16	0x0440
+
+/* RK3368_GRF_SOC_CON15 */
+#define RK3368_GMAC_PHY_INTF_SEL_RGMII	(GRF_BIT(9) | GRF_CLR_BIT(10) | \
+					 GRF_CLR_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL_RMII	(GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+					 GRF_BIT(11))
+#define RK3368_GMAC_FLOW_CTRL		GRF_BIT(8)
+#define RK3368_GMAC_FLOW_CTRL_CLR	GRF_CLR_BIT(8)
+#define RK3368_GMAC_SPEED_10M		GRF_CLR_BIT(7)
+#define RK3368_GMAC_SPEED_100M		GRF_BIT(7)
+#define RK3368_GMAC_RMII_CLK_25M	GRF_BIT(3)
+#define RK3368_GMAC_RMII_CLK_2_5M	GRF_CLR_BIT(3)
+#define RK3368_GMAC_CLK_125M		(GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3368_GMAC_CLK_25M		(GRF_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_2_5M		(GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_RMII_MODE		GRF_BIT(6)
+#define RK3368_GMAC_RMII_MODE_CLR	GRF_CLR_BIT(6)
+
+/* RK3368_GRF_SOC_CON16 */
+#define RK3368_GMAC_TXCLK_DLY_ENABLE	GRF_BIT(7)
+#define RK3368_GMAC_TXCLK_DLY_DISABLE	GRF_CLR_BIT(7)
+#define RK3368_GMAC_RXCLK_DLY_ENABLE	GRF_BIT(15)
+#define RK3368_GMAC_RXCLK_DLY_DISABLE	GRF_CLR_BIT(15)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
+				int tx_delay, int rx_delay)
 {
 	struct device *dev = &bsp_priv->pdev->dev;
 
@@ -115,11 +222,30 @@
 		return;
 	}
 
-	regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
-		     GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
+	regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+		     RK3368_GMAC_PHY_INTF_SEL_RGMII |
+		     RK3368_GMAC_RMII_MODE_CLR);
+	regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
+		     RK3368_GMAC_RXCLK_DLY_ENABLE |
+		     RK3368_GMAC_TXCLK_DLY_ENABLE |
+		     RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
+		     RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
 }
 
-static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+	struct device *dev = &bsp_priv->pdev->dev;
+
+	if (IS_ERR(bsp_priv->grf)) {
+		dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+		return;
+	}
+
+	regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+		     RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+}
+
+static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
 {
 	struct device *dev = &bsp_priv->pdev->dev;
 
@@ -129,16 +255,19 @@
 	}
 
 	if (speed == 10)
-		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
+		regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+			     RK3368_GMAC_CLK_2_5M);
 	else if (speed == 100)
-		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
+		regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+			     RK3368_GMAC_CLK_25M);
 	else if (speed == 1000)
-		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
+		regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+			     RK3368_GMAC_CLK_125M);
 	else
 		dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
 }
 
-static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
 {
 	struct device *dev = &bsp_priv->pdev->dev;
 
@@ -148,16 +277,25 @@
 	}
 
 	if (speed == 10) {
-		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
-			     GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
+		regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+			     RK3368_GMAC_RMII_CLK_2_5M |
+			     RK3368_GMAC_SPEED_10M);
 	} else if (speed == 100) {
-		regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
-			     GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
+		regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+			     RK3368_GMAC_RMII_CLK_25M |
+			     RK3368_GMAC_SPEED_100M);
 	} else {
 		dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
 	}
 }
 
+struct rk_gmac_ops rk3368_ops = {
+	.set_to_rgmii = rk3368_set_to_rgmii,
+	.set_to_rmii = rk3368_set_to_rmii,
+	.set_rgmii_speed = rk3368_set_rgmii_speed,
+	.set_rmii_speed = rk3368_set_rmii_speed,
+};
+
 static int gmac_clk_init(struct rk_priv_data *bsp_priv)
 {
 	struct device *dev = &bsp_priv->pdev->dev;
@@ -166,49 +304,49 @@
 
 	bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
 	if (IS_ERR(bsp_priv->mac_clk_rx))
-		dev_err(dev, "%s: cannot get clock %s\n",
-			__func__, "mac_clk_rx");
+		dev_err(dev, "cannot get clock %s\n",
+			"mac_clk_rx");
 
 	bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
 	if (IS_ERR(bsp_priv->mac_clk_tx))
-		dev_err(dev, "%s: cannot get clock %s\n",
-			__func__, "mac_clk_tx");
+		dev_err(dev, "cannot get clock %s\n",
+			"mac_clk_tx");
 
 	bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
 	if (IS_ERR(bsp_priv->aclk_mac))
-		dev_err(dev, "%s: cannot get clock %s\n",
-			__func__, "aclk_mac");
+		dev_err(dev, "cannot get clock %s\n",
+			"aclk_mac");
 
 	bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
 	if (IS_ERR(bsp_priv->pclk_mac))
-		dev_err(dev, "%s: cannot get clock %s\n",
-			__func__, "pclk_mac");
+		dev_err(dev, "cannot get clock %s\n",
+			"pclk_mac");
 
 	bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
 	if (IS_ERR(bsp_priv->clk_mac))
-		dev_err(dev, "%s: cannot get clock %s\n",
-			__func__, "stmmaceth");
+		dev_err(dev, "cannot get clock %s\n",
+			"stmmaceth");
 
 	if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
 		bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
 		if (IS_ERR(bsp_priv->clk_mac_ref))
-			dev_err(dev, "%s: cannot get clock %s\n",
-				__func__, "clk_mac_ref");
+			dev_err(dev, "cannot get clock %s\n",
+				"clk_mac_ref");
 
 		if (!bsp_priv->clock_input) {
 			bsp_priv->clk_mac_refout =
 				devm_clk_get(dev, "clk_mac_refout");
 			if (IS_ERR(bsp_priv->clk_mac_refout))
-				dev_err(dev, "%s: cannot get clock %s\n",
-					__func__, "clk_mac_refout");
+				dev_err(dev, "cannot get clock %s\n",
+					"clk_mac_refout");
 		}
 	}
 
 	if (bsp_priv->clock_input) {
-		dev_info(dev, "%s: clock input from PHY\n", __func__);
+		dev_info(dev, "clock input from PHY\n");
 	} else {
 		if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
-			clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
+			clk_set_rate(bsp_priv->clk_mac, 50000000);
 	}
 
 	return 0;
@@ -292,26 +430,25 @@
 	struct device *dev = &bsp_priv->pdev->dev;
 
 	if (!ldo) {
-		dev_err(dev, "%s: no regulator found\n", __func__);
+		dev_err(dev, "no regulator found\n");
 		return -1;
 	}
 
 	if (enable) {
 		ret = regulator_enable(ldo);
 		if (ret)
-			dev_err(dev, "%s: fail to enable phy-supply\n",
-				__func__);
+			dev_err(dev, "fail to enable phy-supply\n");
 	} else {
 		ret = regulator_disable(ldo);
 		if (ret)
-			dev_err(dev, "%s: fail to disable phy-supply\n",
-				__func__);
+			dev_err(dev, "fail to disable phy-supply\n");
 	}
 
 	return 0;
 }
 
-static void *rk_gmac_setup(struct platform_device *pdev)
+static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
+					  struct rk_gmac_ops *ops)
 {
 	struct rk_priv_data *bsp_priv;
 	struct device *dev = &pdev->dev;
@@ -324,6 +461,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+	bsp_priv->ops = ops;
 
 	bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
 	if (IS_ERR(bsp_priv->regulator)) {
@@ -337,12 +475,11 @@
 
 	ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
 	if (ret) {
-		dev_err(dev, "%s: Can not read property: clock_in_out.\n",
-			__func__);
+		dev_err(dev, "Can not read property: clock_in_out.\n");
 		bsp_priv->clock_input = true;
 	} else {
-		dev_info(dev, "%s: clock input or output? (%s).\n",
-			 __func__, strings);
+		dev_info(dev, "clock input or output? (%s).\n",
+			 strings);
 		if (!strcmp(strings, "input"))
 			bsp_priv->clock_input = true;
 		else
@@ -352,22 +489,22 @@
 	ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
 	if (ret) {
 		bsp_priv->tx_delay = 0x30;
-		dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
-		dev_err(dev, "%s: set tx_delay to 0x%x\n",
-			__func__, bsp_priv->tx_delay);
+		dev_err(dev, "Can not read property: tx_delay.");
+		dev_err(dev, "set tx_delay to 0x%x\n",
+			bsp_priv->tx_delay);
 	} else {
-		dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
+		dev_info(dev, "TX delay(0x%x).\n", value);
 		bsp_priv->tx_delay = value;
 	}
 
 	ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
 	if (ret) {
 		bsp_priv->rx_delay = 0x10;
-		dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
-		dev_err(dev, "%s: set rx_delay to 0x%x\n",
-			__func__, bsp_priv->rx_delay);
+		dev_err(dev, "Can not read property: rx_delay.");
+		dev_err(dev, "set rx_delay to 0x%x\n",
+			bsp_priv->rx_delay);
 	} else {
-		dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
+		dev_info(dev, "RX delay(0x%x).\n", value);
 		bsp_priv->rx_delay = value;
 	}
 
@@ -377,13 +514,14 @@
 
 	/*rmii or rgmii*/
 	if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
-		dev_info(dev, "%s: init for RGMII\n", __func__);
-		set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
+		dev_info(dev, "init for RGMII\n");
+		bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
+					    bsp_priv->rx_delay);
 	} else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
-		dev_info(dev, "%s: init for RMII\n", __func__);
-		set_to_rmii(bsp_priv);
+		dev_info(dev, "init for RMII\n");
+		bsp_priv->ops->set_to_rmii(bsp_priv);
 	} else {
-		dev_err(dev, "%s: NO interface defined!\n", __func__);
+		dev_err(dev, "NO interface defined!\n");
 	}
 
 	gmac_clk_init(bsp_priv);
@@ -391,6 +529,16 @@
 	return bsp_priv;
 }
 
+static void *rk3288_gmac_setup(struct platform_device *pdev)
+{
+	return rk_gmac_setup(pdev, &rk3288_ops);
+}
+
+static void *rk3368_gmac_setup(struct platform_device *pdev)
+{
+	return rk_gmac_setup(pdev, &rk3368_ops);
+}
+
 static int rk_gmac_init(struct platform_device *pdev, void *priv)
 {
 	struct rk_priv_data *bsp_priv = priv;
@@ -421,17 +569,47 @@
 	struct device *dev = &bsp_priv->pdev->dev;
 
 	if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
-		set_rgmii_speed(bsp_priv, speed);
+		bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
 	else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
-		set_rmii_speed(bsp_priv, speed);
+		bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
 	else
 		dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-const struct stmmac_of_data rk3288_gmac_data = {
+static const struct stmmac_of_data rk3288_gmac_data = {
 	.has_gmac = 1,
 	.fix_mac_speed = rk_fix_speed,
-	.setup = rk_gmac_setup,
+	.setup = rk3288_gmac_setup,
 	.init = rk_gmac_init,
 	.exit = rk_gmac_exit,
 };
+
+static const struct stmmac_of_data rk3368_gmac_data = {
+	.has_gmac = 1,
+	.fix_mac_speed = rk_fix_speed,
+	.setup = rk3368_gmac_setup,
+	.init = rk_gmac_init,
+	.exit = rk_gmac_exit,
+};
+
+static const struct of_device_id rk_gmac_dwmac_match[] = {
+	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
+	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
+
+static struct platform_driver rk_gmac_dwmac_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "rk_gmac-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = rk_gmac_dwmac_match,
+	},
+};
+module_platform_driver(rk_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5a36bd2..8141c5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -257,9 +257,28 @@
 	return ret;
 }
 
-const struct stmmac_of_data socfpga_gmac_data = {
+static const struct stmmac_of_data socfpga_gmac_data = {
 	.setup = socfpga_dwmac_probe,
 	.init = socfpga_dwmac_init,
 	.exit = socfpga_dwmac_exit,
 	.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
 };
+
+static const struct of_device_id socfpga_dwmac_match[] = {
+	{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
+
+static struct platform_driver socfpga_dwmac_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "socfpga-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = socfpga_dwmac_match,
+	},
+};
+module_platform_driver(socfpga_dwmac_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index bb6e2dc..a2e8111 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -17,6 +17,7 @@
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include <linux/mfd/syscon.h>
+#include <linux/module.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
@@ -351,16 +352,40 @@
 	return dwmac;
 }
 
-const struct stmmac_of_data stih4xx_dwmac_data = {
+static const struct stmmac_of_data stih4xx_dwmac_data = {
 	.fix_mac_speed = stih4xx_fix_retime_src,
 	.setup = sti_dwmac_setup,
 	.init = stix4xx_init,
 	.exit = sti_dwmac_exit,
 };
 
-const struct stmmac_of_data stid127_dwmac_data = {
+static const struct stmmac_of_data stid127_dwmac_data = {
 	.fix_mac_speed = stid127_fix_retime_src,
 	.setup = sti_dwmac_setup,
 	.init = stid127_init,
 	.exit = sti_dwmac_exit,
 };
+
+static const struct of_device_id sti_dwmac_match[] = {
+	{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
+	{ .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
+	{ .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
+	{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sti_dwmac_match);
+
+static struct platform_driver sti_dwmac_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "sti-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = sti_dwmac_match,
+	},
+};
+module_platform_driver(sti_dwmac_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index c5ea9ab..15048ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,9 @@
 
 #include <linux/stmmac.h>
 #include <linux/clk.h>
+#include <linux/module.h>
 #include <linux/phy.h>
+#include <linux/platform_device.h>
 #include <linux/of_net.h>
 #include <linux/regulator/consumer.h>
 
@@ -132,7 +134,7 @@
 
 /* of_data specifying hardware features and callbacks.
  * hardware features were copied from Allwinner drivers. */
-const struct stmmac_of_data sun7i_gmac_data = {
+static const struct stmmac_of_data sun7i_gmac_data = {
 	.has_gmac = 1,
 	.tx_coe = 1,
 	.fix_mac_speed = sun7i_fix_speed,
@@ -140,3 +142,24 @@
 	.init = sun7i_gmac_init,
 	.exit = sun7i_gmac_exit,
 };
+
+static const struct of_device_id sun7i_dwmac_match[] = {
+	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
+
+static struct platform_driver sun7i_dwmac_driver = {
+	.probe  = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "sun7i-dwmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = sun7i_dwmac_match,
+	},
+};
+module_platform_driver(sun7i_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunxi DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 1e2bcf5..7d94444 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -240,6 +240,7 @@
 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
 				  int mode, int end)
 {
+	p->des01.all_flags = 0;
 	p->des01.erx.own = 1;
 	p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
 
@@ -254,7 +255,7 @@
 
 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
 {
-	p->des01.etx.own = 0;
+	p->des01.all_flags = 0;
 	if (mode == STMMAC_CHAIN_MODE)
 		ehn_desc_tx_set_on_chain(p, end);
 	else
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 35ad4f4..48c3456 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -123,6 +123,7 @@
 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
 			       int end)
 {
+	p->des01.all_flags = 0;
 	p->des01.rx.own = 1;
 	p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
 
@@ -137,7 +138,7 @@
 
 static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
 {
-	p->des01.tx.own = 0;
+	p->des01.all_flags = 0;
 	if (mode == STMMAC_CHAIN_MODE)
 		ndesc_tx_set_on_chain(p, end);
 	else
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 73bab98..1f3b33a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,14 @@
 #include <linux/ptp_clock_kernel.h>
 #include <linux/reset.h>
 
+struct stmmac_resources {
+	void __iomem *addr;
+	const char *mac;
+	int wol_irq;
+	int lpi_irq;
+	int irq;
+};
+
 struct stmmac_tx_info {
 	dma_addr_t buf;
 	bool map_as_page;
@@ -135,9 +143,9 @@
 int stmmac_resume(struct net_device *ndev);
 int stmmac_suspend(struct net_device *ndev);
 int stmmac_dvr_remove(struct net_device *ndev);
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
-				     struct plat_stmmacenet_data *plat_dat,
-				     void __iomem *addr);
+int stmmac_dvr_probe(struct device *device,
+		     struct plat_stmmacenet_data *plat_dat,
+		     struct stmmac_resources *res);
 void stmmac_disable_eee_mode(struct stmmac_priv *priv);
 bool stmmac_eee_init(struct stmmac_priv *priv);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2c5ce2b..50f7a7a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -52,6 +52,7 @@
 #include "stmmac_ptp.h"
 #include "stmmac.h"
 #include <linux/reset.h>
+#include <linux/of_mdio.h>
 
 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
 
@@ -816,18 +817,25 @@
 	priv->speed = 0;
 	priv->oldduplex = -1;
 
-	if (priv->plat->phy_bus_name)
-		snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
-			 priv->plat->phy_bus_name, priv->plat->bus_id);
-	else
-		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
-			 priv->plat->bus_id);
+	if (priv->plat->phy_node) {
+		phydev = of_phy_connect(dev, priv->plat->phy_node,
+					&stmmac_adjust_link, 0, interface);
+	} else {
+		if (priv->plat->phy_bus_name)
+			snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+				 priv->plat->phy_bus_name, priv->plat->bus_id);
+		else
+			snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+				 priv->plat->bus_id);
 
-	snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
-		 priv->plat->phy_addr);
-	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
+		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+			 priv->plat->phy_addr);
+		pr_debug("stmmac_init_phy:  trying to attach to %s\n",
+			 phy_id_fmt);
 
-	phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+				     interface);
+	}
 
 	if (IS_ERR(phydev)) {
 		pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@
 	 * device as well.
 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
 	 */
-	if (phydev->phy_id == 0) {
+	if (!priv->plat->phy_node && phydev->phy_id == 0) {
 		phy_disconnect(phydev);
 		return -ENODEV;
 	}
@@ -975,13 +983,11 @@
 {
 	struct sk_buff *skb;
 
-	skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
-				 flags);
+	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
 	if (!skb) {
 		pr_err("%s: Rx init fails; skb is NULL\n", __func__);
 		return -ENOMEM;
 	}
-	skb_reserve(skb, NET_IP_ALIGN);
 	priv->rx_skbuff[i] = skb;
 	priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
 						priv->dma_buf_sz,
@@ -1189,41 +1195,41 @@
 		goto err_tx_skbuff;
 
 	if (priv->extend_desc) {
-		priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
-						   sizeof(struct
-							  dma_extended_desc),
-						   &priv->dma_rx_phy,
-						   GFP_KERNEL);
+		priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
+						    sizeof(struct
+							   dma_extended_desc),
+						    &priv->dma_rx_phy,
+						    GFP_KERNEL);
 		if (!priv->dma_erx)
 			goto err_dma;
 
-		priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
-						   sizeof(struct
-							  dma_extended_desc),
-						   &priv->dma_tx_phy,
-						   GFP_KERNEL);
+		priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
+						    sizeof(struct
+							   dma_extended_desc),
+						    &priv->dma_tx_phy,
+						    GFP_KERNEL);
 		if (!priv->dma_etx) {
 			dma_free_coherent(priv->device, priv->dma_rx_size *
-					sizeof(struct dma_extended_desc),
-					priv->dma_erx, priv->dma_rx_phy);
+					  sizeof(struct dma_extended_desc),
+					  priv->dma_erx, priv->dma_rx_phy);
 			goto err_dma;
 		}
 	} else {
-		priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
-						  sizeof(struct dma_desc),
-						  &priv->dma_rx_phy,
-						  GFP_KERNEL);
+		priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
+						   sizeof(struct dma_desc),
+						   &priv->dma_rx_phy,
+						   GFP_KERNEL);
 		if (!priv->dma_rx)
 			goto err_dma;
 
-		priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
-						  sizeof(struct dma_desc),
-						  &priv->dma_tx_phy,
-						  GFP_KERNEL);
+		priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
+						   sizeof(struct dma_desc),
+						   &priv->dma_tx_phy,
+						   GFP_KERNEL);
 		if (!priv->dma_tx) {
 			dma_free_coherent(priv->device, priv->dma_rx_size *
-					sizeof(struct dma_desc),
-					priv->dma_rx, priv->dma_rx_phy);
+					  sizeof(struct dma_desc),
+					  priv->dma_rx, priv->dma_rx_phy);
 			goto err_dma;
 		}
 	}
@@ -2800,16 +2806,15 @@
  * stmmac_dvr_probe
  * @device: device pointer
  * @plat_dat: platform data pointer
- * @addr: iobase memory address
+ * @res: stmmac resource pointer
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
  * Return:
- * on success the new private structure is returned, otherwise the error
- * pointer.
+ * returns 0 on success, otherwise errno.
  */
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
-				     struct plat_stmmacenet_data *plat_dat,
-				     void __iomem *addr)
+int stmmac_dvr_probe(struct device *device,
+		     struct plat_stmmacenet_data *plat_dat,
+		     struct stmmac_resources *res)
 {
 	int ret = 0;
 	struct net_device *ndev = NULL;
@@ -2817,7 +2822,7 @@
 
 	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
 	if (!ndev)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	SET_NETDEV_DEV(ndev, device);
 
@@ -2828,8 +2833,17 @@
 	stmmac_set_ethtool_ops(ndev);
 	priv->pause = pause;
 	priv->plat = plat_dat;
-	priv->ioaddr = addr;
-	priv->dev->base_addr = (unsigned long)addr;
+	priv->ioaddr = res->addr;
+	priv->dev->base_addr = (unsigned long)res->addr;
+
+	priv->dev->irq = res->irq;
+	priv->wol_irq = res->wol_irq;
+	priv->lpi_irq = res->lpi_irq;
+
+	if (res->mac)
+		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+
+	dev_set_drvdata(device, priv);
 
 	/* Verify driver arguments */
 	stmmac_verify_args();
@@ -2944,7 +2958,7 @@
 		}
 	}
 
-	return priv;
+	return 0;
 
 error_mdio_register:
 	unregister_netdev(ndev);
@@ -2957,7 +2971,7 @@
 error_clk_get:
 	free_netdev(ndev);
 
-	return ERR_PTR(ret);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3bca908..d71a721 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -163,7 +163,7 @@
 {
 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
 	struct plat_stmmacenet_data *plat;
-	struct stmmac_priv *priv;
+	struct stmmac_resources res;
 	int i;
 	int ret;
 
@@ -214,19 +214,12 @@
 
 	pci_enable_msi(pdev);
 
-	priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
-	if (IS_ERR(priv)) {
-		dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
-		return PTR_ERR(priv);
-	}
-	priv->dev->irq = pdev->irq;
-	priv->wol_irq = pdev->irq;
+	memset(&res, 0, sizeof(res));
+	res.addr = pcim_iomap_table(pdev)[i];
+	res.wol_irq = pdev->irq;
+	res.irq = pdev->irq;
 
-	pci_set_drvdata(pdev, priv->dev);
-
-	dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
-
-	return 0;
+	return stmmac_dvr_probe(&pdev->dev, plat, &res);
 }
 
 /**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 68aec5c..f3918c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -28,29 +28,11 @@
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
+#include <linux/of_mdio.h>
 
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
-static const struct of_device_id stmmac_dt_ids[] = {
-	/* SoC specific glue layers should come before generic bindings */
-	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-	{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
-	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
-	{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
-	{ .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
-	{ .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
-	{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
-	{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
-	{ .compatible = "st,spear600-gmac"},
-	{ .compatible = "snps,dwmac-3.610"},
-	{ .compatible = "snps,dwmac-3.70a"},
-	{ .compatible = "snps,dwmac-3.710"},
-	{ .compatible = "snps,dwmac"},
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-
 #ifdef CONFIG_OF
 
 /**
@@ -129,14 +111,9 @@
 	struct device_node *np = pdev->dev.of_node;
 	struct stmmac_dma_cfg *dma_cfg;
 	const struct of_device_id *device;
+	struct device *dev = &pdev->dev;
 
-	if (!np)
-		return -ENODEV;
-
-	device = of_match_device(stmmac_dt_ids, &pdev->dev);
-	if (!device)
-		return -ENODEV;
-
+	device = of_match_device(dev->driver->of_match_table, dev);
 	if (device->data) {
 		const struct stmmac_of_data *data = device->data;
 		plat->has_gmac = data->has_gmac;
@@ -168,13 +145,24 @@
 	/* Default to phy auto-detection */
 	plat->phy_addr = -1;
 
+	/* If we find a phy-handle property, use it as the PHY */
+	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+	/* If phy-handle is not specified, check if we have a fixed-phy */
+	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+		if ((of_phy_register_fixed_link(np) < 0))
+			return -ENODEV;
+
+		plat->phy_node = of_node_get(np);
+	}
+
 	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
 	 * and warn of its use. Remove this when phy node support is added.
 	 */
 	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
-	if (plat->phy_bus_name)
+	if (plat->phy_node || plat->phy_bus_name)
 		plat->mdio_bus_data = NULL;
 	else
 		plat->mdio_bus_data =
@@ -232,8 +220,10 @@
 	if (of_find_property(np, "snps,pbl", NULL)) {
 		dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
 				       GFP_KERNEL);
-		if (!dma_cfg)
+		if (!dma_cfg) {
+			of_node_put(np);
 			return -ENOMEM;
+		}
 		plat->dma_cfg = dma_cfg;
 		of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
 		dma_cfg->fixed_burst =
@@ -268,27 +258,26 @@
  * the necessary platform resources, invoke custom helper (if required) and
  * invoke the main probe function.
  */
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_pltfr_probe(struct platform_device *pdev)
 {
+	struct stmmac_resources stmmac_res;
 	int ret = 0;
 	struct resource *res;
 	struct device *dev = &pdev->dev;
-	void __iomem *addr = NULL;
-	struct stmmac_priv *priv = NULL;
 	struct plat_stmmacenet_data *plat_dat = NULL;
-	const char *mac = NULL;
-	int irq, wol_irq, lpi_irq;
+
+	memset(&stmmac_res, 0, sizeof(stmmac_res));
 
 	/* Get IRQ information early to have an ability to ask for deferred
 	 * probe if needed before we went too far with resource allocation.
 	 */
-	irq = platform_get_irq_byname(pdev, "macirq");
-	if (irq < 0) {
-		if (irq != -EPROBE_DEFER) {
+	stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
+	if (stmmac_res.irq < 0) {
+		if (stmmac_res.irq != -EPROBE_DEFER) {
 			dev_err(dev,
 				"MAC IRQ configuration information not found\n");
 		}
-		return irq;
+		return stmmac_res.irq;
 	}
 
 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -298,21 +287,21 @@
 	 * In case the wake up interrupt is not passed from the platform
 	 * so the driver will continue to use the mac irq (ndev->irq)
 	 */
-	wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-	if (wol_irq < 0) {
-		if (wol_irq == -EPROBE_DEFER)
+	stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+	if (stmmac_res.wol_irq < 0) {
+		if (stmmac_res.wol_irq == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
-		wol_irq = irq;
+		stmmac_res.wol_irq = stmmac_res.irq;
 	}
 
-	lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-	if (lpi_irq == -EPROBE_DEFER)
+	stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+	if (stmmac_res.lpi_irq == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	addr = devm_ioremap_resource(dev, res);
-	if (IS_ERR(addr))
-		return PTR_ERR(addr);
+	stmmac_res.addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(stmmac_res.addr))
+		return PTR_ERR(stmmac_res.addr);
 
 	plat_dat = dev_get_platdata(&pdev->dev);
 
@@ -332,7 +321,7 @@
 	plat_dat->unicast_filter_entries = 1;
 
 	if (pdev->dev.of_node) {
-		ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+		ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
 		if (ret) {
 			pr_err("%s: main dt probe failed", __func__);
 			return ret;
@@ -353,27 +342,9 @@
 			return ret;
 	}
 
-	priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
-	if (IS_ERR(priv)) {
-		pr_err("%s: main driver probe failed", __func__);
-		return PTR_ERR(priv);
-	}
-
-	/* Copy IRQ values to priv structure which is now avaialble */
-	priv->dev->irq = irq;
-	priv->wol_irq = wol_irq;
-	priv->lpi_irq = lpi_irq;
-
-	/* Get MAC address if available (DT) */
-	if (mac)
-		memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
-
-	platform_set_drvdata(pdev, priv->dev);
-
-	pr_debug("STMMAC platform driver registration completed");
-
-	return 0;
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
+EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
 
 /**
  * stmmac_pltfr_remove
@@ -381,7 +352,7 @@
  * Description: this function calls the main to free the net resources
  * and calls the platforms hook and release the resources (e.g. mem).
  */
-static int stmmac_pltfr_remove(struct platform_device *pdev)
+int stmmac_pltfr_remove(struct platform_device *pdev)
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
@@ -395,6 +366,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
 
 #ifdef CONFIG_PM_SLEEP
 /**
@@ -438,21 +410,6 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
-			 stmmac_pltfr_suspend, stmmac_pltfr_resume);
-
-static struct platform_driver stmmac_pltfr_driver = {
-	.probe = stmmac_pltfr_probe,
-	.remove = stmmac_pltfr_remove,
-	.driver = {
-		   .name = STMMAC_RESOURCE_NAME,
-		   .pm = &stmmac_pltfr_pm_ops,
-		   .of_match_table = of_match_ptr(stmmac_dt_ids),
-	},
-};
-
-module_platform_driver(stmmac_pltfr_driver);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
-MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
-MODULE_LICENSE("GPL");
+SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
+				       stmmac_pltfr_resume);
+EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 093eb99..71da86d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,11 +19,8 @@
 #ifndef __STMMAC_PLATFORM_H__
 #define __STMMAC_PLATFORM_H__
 
-extern const struct stmmac_of_data meson6_dwmac_data;
-extern const struct stmmac_of_data sun7i_gmac_data;
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-extern const struct stmmac_of_data socfpga_gmac_data;
-extern const struct stmmac_of_data rk3288_gmac_data;
+int stmmac_pltfr_probe(struct platform_device *pdev);
+int stmmac_pltfr_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
 #endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index 3074aa37..dee94b6 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on SUN3 || SBUS || PCI || SUN_LDOMS
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say
-	  Y and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
index 1fc027e..b17f0ca 100644
--- a/drivers/net/ethernet/tehuti/Kconfig
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 631e0af..e7f0b7d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -110,9 +108,7 @@
 	depends on (PCI || EISA)
 	---help---
 	  If you have a PCI Ethernet network card based on the ThunderLAN chip
-	  which is supported by this driver, say Y and read the
-	  Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  which is supported by this driver, say Y here.
 
 	  Devices currently supported by this driver are Compaq Netelligent,
 	  Compaq NetFlex and Olicom cards.  Please read the file
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b536b4c..4628205 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1361,7 +1361,6 @@
 	if (cpsw_common_res_usage_state(priv) <= 1) {
 		cpts_unregister(priv->cpts);
 		cpsw_intr_disable(priv);
-		cpdma_ctlr_int_ctrl(priv->dma, false);
 		cpdma_ctlr_stop(priv->dma);
 		cpsw_ale_stop(priv->ale);
 	}
@@ -1456,7 +1455,7 @@
 
 		if (priv->cpts->rx_enable)
 			ctrl |= CTRL_V2_RX_TS_BITS;
-	break;
+		break;
 	case CPSW_VERSION_3:
 	default:
 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
@@ -1466,7 +1465,7 @@
 
 		if (priv->cpts->rx_enable)
 			ctrl |= CTRL_V3_RX_TS_BITS;
-	break;
+		break;
 	}
 
 	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1589,10 +1588,8 @@
 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
 	ndev->stats.tx_errors++;
 	cpsw_intr_disable(priv);
-	cpdma_ctlr_int_ctrl(priv->dma, false);
 	cpdma_chan_stop(priv->txch);
 	cpdma_chan_start(priv->txch);
-	cpdma_ctlr_int_ctrl(priv->dma, true);
 	cpsw_intr_enable(priv);
 }
 
@@ -1629,10 +1626,8 @@
 	struct cpsw_priv *priv = netdev_priv(ndev);
 
 	cpsw_intr_disable(priv);
-	cpdma_ctlr_int_ctrl(priv->dma, false);
 	cpsw_rx_interrupt(priv->irqs_table[0], priv);
 	cpsw_tx_interrupt(priv->irqs_table[1], priv);
-	cpdma_ctlr_int_ctrl(priv->dma, true);
 	cpsw_intr_enable(priv);
 }
 #endif
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 6e927b4..43b061b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -268,39 +268,6 @@
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
 
-static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
-				 int port_mask)
-{
-	int port;
-
-	port = cpsw_ale_get_port_num(ale_entry);
-	if ((BIT(port) & port_mask) == 0)
-		return; /* ports dont intersect, not interested */
-	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-}
-
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
-{
-	u32 ale_entry[ALE_ENTRY_WORDS];
-	int ret, idx;
-
-	for (idx = 0; idx < ale->params.ale_entries; idx++) {
-		cpsw_ale_read(ale, idx, ale_entry);
-		ret = cpsw_ale_get_entry_type(ale_entry);
-		if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
-			continue;
-
-		if (cpsw_ale_get_mcast(ale_entry))
-			cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
-		else
-			cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
-
-		cpsw_ale_write(ale, idx, ale_entry);
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush);
-
 static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
 						int flags, u16 vid)
 {
@@ -752,18 +719,6 @@
 	}
 }
 
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
-{
-	del_timer_sync(&ale->timer);
-	ale->ageout = ageout * HZ;
-	if (ale->ageout) {
-		ale->timer.expires = jiffies + ale->ageout;
-		add_timer(&ale->timer);
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
-
 void cpsw_ale_start(struct cpsw_ale *ale)
 {
 	u32 rev;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index af1e7ec..a700189 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -90,8 +90,6 @@
 void cpsw_ale_start(struct cpsw_ale *ale);
 void cpsw_ale_stop(struct cpsw_ale *ale);
 
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
 int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
 		       int flags, u16 vid);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 43efc3a..5ec4ed3 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -537,7 +537,7 @@
 static void netcp_frag_free(bool is_frag, void *ptr)
 {
 	if (is_frag)
-		put_page(virt_to_head_page(ptr));
+		skb_free_frag(ptr);
 	else
 		kfree(ptr);
 }
@@ -698,7 +698,6 @@
 		}
 	}
 
-	netcp->ndev->last_rx = jiffies;
 	netcp->ndev->stats.rx_packets++;
 	netcp->ndev->stats.rx_bytes += skb->len;
 
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 3d8f60d..6f0a449 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -721,9 +721,6 @@
 	if (!hash_default)
 		__inv_buffer(buf, len);
 
-	/* ISSUE: Is this needed? */
-	dev->last_rx = jiffies;
-
 #ifdef TILE_NET_DUMP_PACKETS
 	dump_packet(buf, len, "rx");
 #endif /* TILE_NET_DUMP_PACKETS */
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 5d244b6..6f1d5b6 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ac62a5e..79f0ec4 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -102,6 +102,18 @@
 	}
 }
 
+/**
+ * gelic_descr_get_status -- returns the status of a descriptor
+ * @descr: descriptor to look at
+ *
+ * returns the status as in the dmac_cmd_status field of the descriptor
+ */
+static enum gelic_descr_dma_status
+gelic_descr_get_status(struct gelic_descr *descr)
+{
+	return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+}
+
 static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
 {
 	int status;
@@ -278,18 +290,6 @@
 }
 
 /**
- * gelic_descr_get_status -- returns the status of a descriptor
- * @descr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static enum gelic_descr_dma_status
-gelic_descr_get_status(struct gelic_descr *descr)
-{
-	return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
-}
-
-/**
  * gelic_card_free_chain - free descriptor chain
  * @card: card structure
  * @descr_in: address of desc
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 8e9371a..3c54a2c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -604,8 +604,7 @@
 	int i;
 	u32 reg;
 	struct spider_net_card *card = netdev_priv(netdev);
-	unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
-		{0, };
+	DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
 
 	spider_net_set_promisc(card);
 
diff --git a/drivers/net/ethernet/tundra/Kconfig b/drivers/net/ethernet/tundra/Kconfig
index cf7d69b..81d845e 100644
--- a/drivers/net/ethernet/tundra/Kconfig
+++ b/drivers/net/ethernet/tundra/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on TSI108_BRIDGE
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index f66ddae..8b0b1d6 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -6,9 +6,7 @@
 	bool "VIA devices"
 	default y
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
@@ -19,7 +17,7 @@
 
 config VIA_RHINE
 	tristate "VIA Rhine support"
-	depends on (PCI || USE_OF)
+	depends on (PCI || OF_IRQ)
 	select CRC32
 	select MII
 	---help---
@@ -43,7 +41,7 @@
 
 config VIA_VELOCITY
 	tristate "VIA Velocity support"
-	depends on (PCI || USE_OF)
+	depends on (PCI || (OF_ADDRESS && OF_IRQ))
 	select CRC32
 	select CRC_CCITT
 	select MII
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index de28504..a832637 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -96,7 +96,6 @@
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
-#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
@@ -472,8 +471,7 @@
 
 	/* Frequently used values: keep some adjacent for cache effect. */
 	u32 quirks;
-	struct rx_desc *rx_head_desc;
-	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
+	unsigned int cur_rx;
 	unsigned int cur_tx, dirty_tx;
 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
 	struct rhine_stats rx_stats;
@@ -1213,17 +1211,61 @@
 
 }
 
-static void alloc_rbufs(struct net_device *dev)
+struct rhine_skb_dma {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+};
+
+static inline int rhine_skb_dma_init(struct net_device *dev,
+				     struct rhine_skb_dma *sd)
 {
 	struct rhine_private *rp = netdev_priv(dev);
 	struct device *hwdev = dev->dev.parent;
-	dma_addr_t next;
+	const int size = rp->rx_buf_sz;
+
+	sd->skb = netdev_alloc_skb(dev, size);
+	if (!sd->skb)
+		return -ENOMEM;
+
+	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
+		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
+		dev_kfree_skb_any(sd->skb);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void rhine_reset_rbufs(struct rhine_private *rp)
+{
 	int i;
 
-	rp->dirty_rx = rp->cur_rx = 0;
+	rp->cur_rx = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++)
+		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+}
+
+static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
+					   struct rhine_skb_dma *sd, int entry)
+{
+	rp->rx_skbuff_dma[entry] = sd->dma;
+	rp->rx_skbuff[entry] = sd->skb;
+
+	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
+	dma_wmb();
+}
+
+static void free_rbufs(struct net_device* dev);
+
+static int alloc_rbufs(struct net_device *dev)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+	dma_addr_t next;
+	int rc, i;
 
 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
-	rp->rx_head_desc = &rp->rx_ring[0];
 	next = rp->rx_ring_dma;
 
 	/* Init the ring entries */
@@ -1239,23 +1281,20 @@
 
 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
-		rp->rx_skbuff[i] = skb;
-		if (skb == NULL)
-			break;
+		struct rhine_skb_dma sd;
 
-		rp->rx_skbuff_dma[i] =
-			dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
-				       DMA_FROM_DEVICE);
-		if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
-			rp->rx_skbuff_dma[i] = 0;
-			dev_kfree_skb(skb);
-			break;
+		rc = rhine_skb_dma_init(dev, &sd);
+		if (rc < 0) {
+			free_rbufs(dev);
+			goto out;
 		}
-		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
-		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+
+		rhine_skb_dma_nic_store(rp, &sd, i);
 	}
-	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+	rhine_reset_rbufs(rp);
+out:
+	return rc;
 }
 
 static void free_rbufs(struct net_device* dev)
@@ -1659,16 +1698,18 @@
 
 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
 	if (rc)
-		return rc;
+		goto out;
 
 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
 
 	rc = alloc_ring(dev);
-	if (rc) {
-		free_irq(rp->irq, dev);
-		return rc;
-	}
-	alloc_rbufs(dev);
+	if (rc < 0)
+		goto out_free_irq;
+
+	rc = alloc_rbufs(dev);
+	if (rc < 0)
+		goto out_free_ring;
+
 	alloc_tbufs(dev);
 	rhine_chip_reset(dev);
 	rhine_task_enable(rp);
@@ -1680,7 +1721,14 @@
 
 	netif_start_queue(dev);
 
-	return 0;
+out:
+	return rc;
+
+out_free_ring:
+	free_ring(dev);
+out_free_irq:
+	free_irq(rp->irq, dev);
+	goto out;
 }
 
 static void rhine_reset_task(struct work_struct *work)
@@ -1700,9 +1748,9 @@
 
 	/* clear all descriptors */
 	free_tbufs(dev);
-	free_rbufs(dev);
 	alloc_tbufs(dev);
-	alloc_rbufs(dev);
+
+	rhine_reset_rbufs(rp);
 
 	/* Reinitialize the hardware. */
 	rhine_chip_reset(dev);
@@ -1730,6 +1778,11 @@
 	schedule_work(&rp->reset_task);
 }
 
+static inline bool rhine_tx_queue_full(struct rhine_private *rp)
+{
+	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
+}
+
 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 				  struct net_device *dev)
 {
@@ -1800,11 +1853,17 @@
 
 	netdev_sent_queue(dev, skb->len);
 	/* lock eth irq */
-	wmb();
+	dma_wmb();
 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
 	wmb();
 
 	rp->cur_tx++;
+	/*
+	 * Nobody wants cur_tx write to rot for ages after the NIC will have
+	 * seen the transmit request, especially as the transmit completion
+	 * handler could miss it.
+	 */
+	smp_wmb();
 
 	/* Non-x86 Todo: explicitly flush cache lines here. */
 
@@ -1817,8 +1876,14 @@
 	       ioaddr + ChipCmd1);
 	IOSYNC;
 
-	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
+	if (rhine_tx_queue_full(rp)) {
 		netif_stop_queue(dev);
+		smp_rmb();
+		/* Rejuvenate. */
+		if (!rhine_tx_queue_full(rp))
+			netif_wake_queue(dev);
+	}
 
 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
 		  rp->cur_tx - 1, entry);
@@ -1866,13 +1931,24 @@
 {
 	struct rhine_private *rp = netdev_priv(dev);
 	struct device *hwdev = dev->dev.parent;
-	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
 	unsigned int pkts_compl = 0, bytes_compl = 0;
+	unsigned int dirty_tx = rp->dirty_tx;
+	unsigned int cur_tx;
 	struct sk_buff *skb;
 
+	/*
+	 * The race with rhine_start_tx does not matter here as long as the
+	 * driver enforces a value of cur_tx that was relevant when the
+	 * packet was scheduled to the network chipset.
+	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
+	 */
+	smp_rmb();
+	cur_tx = rp->cur_tx;
 	/* find and cleanup dirty tx descriptors */
-	while (rp->dirty_tx != rp->cur_tx) {
-		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+	while (dirty_tx != cur_tx) {
+		unsigned int entry = dirty_tx % TX_RING_SIZE;
+		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+
 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
 			  entry, txstatus);
 		if (txstatus & DescOwn)
@@ -1921,12 +1997,23 @@
 		pkts_compl++;
 		dev_consume_skb_any(skb);
 		rp->tx_skbuff[entry] = NULL;
-		entry = (++rp->dirty_tx) % TX_RING_SIZE;
+		dirty_tx++;
 	}
 
+	rp->dirty_tx = dirty_tx;
+	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
+	smp_wmb();
+
 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
-	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+
+	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
+	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
 		netif_wake_queue(dev);
+		smp_rmb();
+		/* Rejuvenate. */
+		if (rhine_tx_queue_full(rp))
+			netif_stop_queue(dev);
+	}
 }
 
 /**
@@ -1944,22 +2031,33 @@
 	return be16_to_cpup((__be16 *)trailer);
 }
 
+static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
+				     int data_size)
+{
+	dma_rmb();
+	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
+		u16 vlan_tci;
+
+		vlan_tci = rhine_get_vlan_tci(skb, data_size);
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+	}
+}
+
 /* Process up to limit frames from receive ring */
 static int rhine_rx(struct net_device *dev, int limit)
 {
 	struct rhine_private *rp = netdev_priv(dev);
 	struct device *hwdev = dev->dev.parent;
-	int count;
 	int entry = rp->cur_rx % RX_RING_SIZE;
+	int count;
 
 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
-		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
+		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
 
 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
 	for (count = 0; count < limit; ++count) {
-		struct rx_desc *desc = rp->rx_head_desc;
+		struct rx_desc *desc = rp->rx_ring + entry;
 		u32 desc_status = le32_to_cpu(desc->rx_status);
-		u32 desc_length = le32_to_cpu(desc->desc_length);
 		int data_size = desc_status >> 16;
 
 		if (desc_status & DescOwn)
@@ -1975,10 +2073,6 @@
 	"entry %#x length %d status %08x!\n",
 					    entry, data_size,
 					    desc_status);
-				netdev_warn(dev,
-					    "Oversized Ethernet frame %p vs %p\n",
-					    rp->rx_head_desc,
-					    &rp->rx_ring[entry]);
 				dev->stats.rx_length_errors++;
 			} else if (desc_status & RxErr) {
 				/* There was a error. */
@@ -2000,16 +2094,17 @@
 				}
 			}
 		} else {
-			struct sk_buff *skb = NULL;
 			/* Length should omit the CRC */
 			int pkt_len = data_size - 4;
-			u16 vlan_tci = 0;
+			struct sk_buff *skb;
 
 			/* Check if the packet is long enough to accept without
 			   copying to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak)
+			if (pkt_len < rx_copybreak) {
 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
-			if (skb) {
+				if (unlikely(!skb))
+					goto drop;
+
 				dma_sync_single_for_cpu(hwdev,
 							rp->rx_skbuff_dma[entry],
 							rp->rx_buf_sz,
@@ -2018,32 +2113,31 @@
 				skb_copy_to_linear_data(skb,
 						 rp->rx_skbuff[entry]->data,
 						 pkt_len);
-				skb_put(skb, pkt_len);
+
 				dma_sync_single_for_device(hwdev,
 							   rp->rx_skbuff_dma[entry],
 							   rp->rx_buf_sz,
 							   DMA_FROM_DEVICE);
 			} else {
+				struct rhine_skb_dma sd;
+
+				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
+					goto drop;
+
 				skb = rp->rx_skbuff[entry];
-				if (skb == NULL) {
-					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
-					break;
-				}
-				rp->rx_skbuff[entry] = NULL;
-				skb_put(skb, pkt_len);
+
 				dma_unmap_single(hwdev,
 						 rp->rx_skbuff_dma[entry],
 						 rp->rx_buf_sz,
 						 DMA_FROM_DEVICE);
+				rhine_skb_dma_nic_store(rp, &sd, entry);
 			}
 
-			if (unlikely(desc_length & DescTag))
-				vlan_tci = rhine_get_vlan_tci(skb, data_size);
-
+			skb_put(skb, pkt_len);
 			skb->protocol = eth_type_trans(skb, dev);
 
-			if (unlikely(desc_length & DescTag))
-				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+			rhine_rx_vlan_tag(skb, desc, data_size);
+
 			netif_receive_skb(skb);
 
 			u64_stats_update_begin(&rp->rx_stats.syncp);
@@ -2051,35 +2145,16 @@
 			rp->rx_stats.packets++;
 			u64_stats_update_end(&rp->rx_stats.syncp);
 		}
+give_descriptor_to_nic:
+		desc->rx_status = cpu_to_le32(DescOwn);
 		entry = (++rp->cur_rx) % RX_RING_SIZE;
-		rp->rx_head_desc = &rp->rx_ring[entry];
-	}
-
-	/* Refill the Rx ring buffers. */
-	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
-		struct sk_buff *skb;
-		entry = rp->dirty_rx % RX_RING_SIZE;
-		if (rp->rx_skbuff[entry] == NULL) {
-			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
-			rp->rx_skbuff[entry] = skb;
-			if (skb == NULL)
-				break;	/* Better luck next round. */
-			rp->rx_skbuff_dma[entry] =
-				dma_map_single(hwdev, skb->data,
-					       rp->rx_buf_sz,
-					       DMA_FROM_DEVICE);
-			if (dma_mapping_error(hwdev,
-					      rp->rx_skbuff_dma[entry])) {
-				dev_kfree_skb(skb);
-				rp->rx_skbuff_dma[entry] = 0;
-				break;
-			}
-			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
-		}
-		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
 	}
 
 	return count;
+
+drop:
+	dev->stats.rx_dropped++;
+	goto give_descriptor_to_nic;
 }
 
 static void rhine_restart_tx(struct net_device *dev) {
@@ -2484,9 +2559,8 @@
 	enable_mmio(rp->pioaddr, rp->quirks);
 	rhine_power_init(dev);
 	free_tbufs(dev);
-	free_rbufs(dev);
 	alloc_tbufs(dev);
-	alloc_rbufs(dev);
+	rhine_reset_rbufs(rp);
 	rhine_task_enable(rp);
 	spin_lock_bh(&rp->lock);
 	init_registers(dev);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index b4d2816..f98b91d 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -7,9 +7,7 @@
 	depends on HAS_IOMEM
 	default y
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 7b90a5e..4f5c024 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index af2694d..5a1068d 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -62,12 +62,12 @@
 
 u32 temac_ior(struct temac_local *lp, int offset)
 {
-	return in_be32((u32 *)(lp->regs + offset));
+	return in_be32(lp->regs + offset);
 }
 
 void temac_iow(struct temac_local *lp, int offset, u32 value)
 {
-	out_be32((u32 *) (lp->regs + offset), value);
+	out_be32(lp->regs + offset, value);
 }
 
 int temac_indirect_busywait(struct temac_local *lp)
@@ -124,7 +124,7 @@
  */
 static u32 temac_dma_in32(struct temac_local *lp, int reg)
 {
-	return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+	return in_be32(lp->sdma_regs + (reg << 2));
 }
 
 /**
@@ -134,7 +134,7 @@
  */
 static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
 {
-	out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+	out_be32(lp->sdma_regs + (reg << 2), value);
 }
 
 /* DMA register access functions can be DCR based or memory mapped.
@@ -400,7 +400,7 @@
 	mutex_unlock(&lp->indirect_mutex);
 }
 
-struct temac_option {
+static struct temac_option {
 	int flg;
 	u32 opt;
 	u32 reg;
@@ -587,7 +587,7 @@
 	ndev->trans_start = jiffies; /* prevent tx timeout */
 }
 
-void temac_adjust_link(struct net_device *ndev)
+static void temac_adjust_link(struct net_device *ndev)
 {
 	struct temac_local *lp = netdev_priv(ndev);
 	struct phy_device *phy = lp->phy_dev;
@@ -688,10 +688,8 @@
 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 
 	if (temac_check_tx_bd_space(lp, num_frag)) {
-		if (!netif_queue_stopped(ndev)) {
+		if (!netif_queue_stopped(ndev))
 			netif_stop_queue(ndev);
-			return NETDEV_TX_BUSY;
-		}
 		return NETDEV_TX_BUSY;
 	}
 
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 4c9b4fa..7cb9aba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -11,16 +11,16 @@
 #include <linux/netdevice.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/if_vlan.h>
 
 /* Packet size info */
 #define XAE_HDR_SIZE			14 /* Size of Ethernet header */
-#define XAE_HDR_VLAN_SIZE		18 /* Size of an Ethernet hdr + VLAN */
 #define XAE_TRL_SIZE			 4 /* Size of Ethernet trailer (FCS) */
 #define XAE_MTU			      1500 /* Max MTU of an Ethernet frame */
 #define XAE_JUMBO_MTU		      9000 /* Max MTU of a jumbo Eth. frame */
 
 #define XAE_MAX_FRAME_SIZE	 (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
-#define XAE_MAX_VLAN_FRAME_SIZE  (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE  (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
 #define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
 
 /* Configuration options */
@@ -38,18 +38,21 @@
 #define XAE_OPTION_FLOW_CONTROL			(1 << 4)
 
 /* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
- * stripped. Default: disabled (set) */
+ * stripped. Default: disabled (set)
+ */
 #define XAE_OPTION_FCS_STRIP			(1 << 5)
 
 /* Generate FCS field and add PAD automatically for outgoing frames.
- * Default: enabled (set) */
+ * Default: enabled (set)
+ */
 #define XAE_OPTION_FCS_INSERT			(1 << 6)
 
 /* Enable Length/Type error checking for incoming frames. When this option is
  * set, the MAC will filter frames that have a mismatched type/length field
  * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
  * types of frames are encountered. When this option is cleared, the MAC will
- * allow these types of frames to be received. Default: enabled (set) */
+ * allow these types of frames to be received. Default: enabled (set)
+ */
 #define XAE_OPTION_LENTYPE_ERR			(1 << 7)
 
 /* Enable the transmitter. Default: enabled (set) */
@@ -159,12 +162,12 @@
 #define XAE_MDIO_MWD_OFFSET	0x00000508 /* MII Management Write Data */
 #define XAE_MDIO_MRD_OFFSET	0x0000050C /* MII Management Read Data */
 #define XAE_MDIO_MIS_OFFSET	0x00000600 /* MII Management Interrupt Status */
-#define XAE_MDIO_MIP_OFFSET	0x00000620 /* MII Mgmt Interrupt Pending
-					    * register offset */
-#define XAE_MDIO_MIE_OFFSET	0x00000640 /* MII Management Interrupt Enable
-					    * register offset */
-#define XAE_MDIO_MIC_OFFSET	0x00000660 /* MII Management Interrupt Clear
-					    * register offset. */
+/* MII Mgmt Interrupt Pending register offset */
+#define XAE_MDIO_MIP_OFFSET	0x00000620
+/* MII Management Interrupt Enable register offset */
+#define XAE_MDIO_MIE_OFFSET	0x00000640
+/* MII Management Interrupt Clear register offset. */
+#define XAE_MDIO_MIC_OFFSET	0x00000660
 #define XAE_UAW0_OFFSET		0x00000700 /* Unicast address word 0 */
 #define XAE_UAW1_OFFSET		0x00000704 /* Unicast address word 1 */
 #define XAE_FMI_OFFSET		0x00000708 /* Filter Mask Index */
@@ -176,18 +179,17 @@
 #define XAE_MCAST_TABLE_OFFSET	0x00020000 /* Multicast table address */
 
 /* Bit Masks for Axi Ethernet RAF register */
-#define XAE_RAF_MCSTREJ_MASK		0x00000002 /* Reject receive multicast
-						    * destination address */
-#define XAE_RAF_BCSTREJ_MASK		0x00000004 /* Reject receive broadcast
-						    * destination address */
+/* Reject receive multicast destination address */
+#define XAE_RAF_MCSTREJ_MASK		0x00000002
+/* Reject receive broadcast destination address */
+#define XAE_RAF_BCSTREJ_MASK		0x00000004
 #define XAE_RAF_TXVTAGMODE_MASK		0x00000018 /* Tx VLAN TAG mode */
 #define XAE_RAF_RXVTAGMODE_MASK		0x00000060 /* Rx VLAN TAG mode */
 #define XAE_RAF_TXVSTRPMODE_MASK	0x00000180 /* Tx VLAN STRIP mode */
 #define XAE_RAF_RXVSTRPMODE_MASK	0x00000600 /* Rx VLAN STRIP mode */
 #define XAE_RAF_NEWFNCENBL_MASK		0x00000800 /* New function mode */
-#define XAE_RAF_EMULTIFLTRENBL_MASK	0x00001000 /* Exteneded Multicast
-						    * Filtering mode
-						    */
+/* Exteneded Multicast Filtering mode */
+#define XAE_RAF_EMULTIFLTRENBL_MASK	0x00001000
 #define XAE_RAF_STATSRST_MASK		0x00002000 /* Stats. Counter Reset */
 #define XAE_RAF_RXBADFRMEN_MASK		0x00004000 /* Recv Bad Frame Enable */
 #define XAE_RAF_TXVTAGMODE_SHIFT	3 /* Tx Tag mode shift bits */
@@ -197,15 +199,16 @@
 
 /* Bit Masks for Axi Ethernet TPF and IFGP registers */
 #define XAE_TPF_TPFV_MASK		0x0000FFFF /* Tx pause frame value */
-#define XAE_IFGP0_IFGP_MASK		0x0000007F /* Transmit inter-frame
-						    * gap adjustment value */
+/* Transmit inter-frame gap adjustment value */
+#define XAE_IFGP0_IFGP_MASK		0x0000007F
 
 /* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
- * for all 3 registers. */
-#define XAE_INT_HARDACSCMPLT_MASK	0x00000001 /* Hard register access
-						    * complete */
-#define XAE_INT_AUTONEG_MASK		0x00000002 /* Auto negotiation
-						    * complete */
+ * for all 3 registers.
+ */
+/* Hard register access complete */
+#define XAE_INT_HARDACSCMPLT_MASK	0x00000001
+/* Auto negotiation complete */
+#define XAE_INT_AUTONEG_MASK		0x00000002
 #define XAE_INT_RXCMPIT_MASK		0x00000004 /* Rx complete */
 #define XAE_INT_RXRJECT_MASK		0x00000008 /* Rx frame rejected */
 #define XAE_INT_RXFIFOOVR_MASK		0x00000010 /* Rx fifo overrun */
@@ -215,10 +218,9 @@
 #define XAE_INT_PHYRSTCMPLT_MASK	0x00000100 /* Phy Reset complete */
 #define XAE_INT_ALL_MASK		0x0000003F /* All the ints */
 
+/* INT bits that indicate receive errors */
 #define XAE_INT_RECV_ERROR_MASK				\
-	(XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
-							 * indicate receive
-							 * errors */
+	(XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK)
 
 /* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
 #define XAE_TPID_0_MASK		0x0000FFFF /* TPID 0 */
@@ -231,27 +233,28 @@
 /* Bit masks for Axi Ethernet RCW1 register */
 #define XAE_RCW1_RST_MASK	0x80000000 /* Reset */
 #define XAE_RCW1_JUM_MASK	0x40000000 /* Jumbo frame enable */
-#define XAE_RCW1_FCS_MASK	0x20000000 /* In-Band FCS enable
-					    * (FCS not stripped) */
+/* In-Band FCS enable (FCS not stripped) */
+#define XAE_RCW1_FCS_MASK	0x20000000
 #define XAE_RCW1_RX_MASK	0x10000000 /* Receiver enable */
 #define XAE_RCW1_VLAN_MASK	0x08000000 /* VLAN frame enable */
-#define XAE_RCW1_LT_DIS_MASK	0x02000000 /* Length/type field valid check
-					    * disable */
-#define XAE_RCW1_CL_DIS_MASK	0x01000000 /* Control frame Length check
-					    * disable */
-#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
-					    * bits [47:32]. Bits [31:0] are
-					    * stored in register RCW0 */
+/* Length/type field valid check disable */
+#define XAE_RCW1_LT_DIS_MASK	0x02000000
+/* Control frame Length check disable */
+#define XAE_RCW1_CL_DIS_MASK	0x01000000
+/* Pause frame source address bits [47:32]. Bits [31:0] are
+ * stored in register RCW0
+ */
+#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
 
 /* Bit masks for Axi Ethernet TC register */
 #define XAE_TC_RST_MASK		0x80000000 /* Reset */
 #define XAE_TC_JUM_MASK		0x40000000 /* Jumbo frame enable */
-#define XAE_TC_FCS_MASK		0x20000000 /* In-Band FCS enable
-					    * (FCS not generated) */
+/* In-Band FCS enable (FCS not generated) */
+#define XAE_TC_FCS_MASK		0x20000000
 #define XAE_TC_TX_MASK		0x10000000 /* Transmitter enable */
 #define XAE_TC_VLAN_MASK	0x08000000 /* VLAN frame enable */
-#define XAE_TC_IFG_MASK		0x02000000 /* Inter-frame gap adjustment
-					    * enable */
+/* Inter-frame gap adjustment enable */
+#define XAE_TC_IFG_MASK		0x02000000
 
 /* Bit masks for Axi Ethernet FCC register */
 #define XAE_FCC_FCRX_MASK	0x20000000 /* Rx flow control enable */
@@ -301,10 +304,10 @@
 #define XAE_MDIO_INT_MIIM_RDY_MASK	0x00000001 /* MIIM Interrupt */
 
 /* Bit masks for Axi Ethernet UAW1 register */
-#define XAE_UAW1_UNICASTADDR_MASK	0x0000FFFF /* Station address bits
-						    * [47:32]; Station address
-						    * bits [31:0] are stored in
-						    * register UAW0 */
+/* Station address bits [47:32]; Station address
+ * bits [31:0] are stored in register UAW0
+ */
+#define XAE_UAW1_UNICASTADDR_MASK	0x0000FFFF
 
 /* Bit masks for Axi Ethernet FMI register */
 #define XAE_FMI_PM_MASK			0x80000000 /* Promis. mode enable */
@@ -320,8 +323,8 @@
 #define XAE_PHY_TYPE_SGMII		4
 #define XAE_PHY_TYPE_1000BASE_X		5
 
-#define XAE_MULTICAST_CAM_TABLE_NUM	4 /* Total number of entries in the
-					   * hardware multicast table. */
+ /* Total number of entries in the hardware multicast table. */
+#define XAE_MULTICAST_CAM_TABLE_NUM	4
 
 /* Axi Ethernet Synthesis features */
 #define XAE_FEATURE_PARTIAL_RX_CSUM	(1 << 0)
@@ -407,8 +410,11 @@
  *		  Txed/Rxed in the existing hardware. If jumbo option is
  *		  supported, the maximum frame size would be 9k. Else it is
  *		  1522 bytes (assuming support for basic VLAN)
- * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
- *		   can handle jumbo packets, this entry will be 1, else 0.
+ * @rxmem:	Stores rx memory size for jumbo frame handling.
+ * @csum_offload_on_tx_path:	Stores the checksum selection on TX side.
+ * @csum_offload_on_rx_path:	Stores the checksum selection on RX side.
+ * @coalesce_count_rx:	Store the irq coalesce on RX side.
+ * @coalesce_count_tx:	Store the irq coalesce on TX side.
  */
 struct axienet_local {
 	struct net_device *ndev;
@@ -446,7 +452,7 @@
 	u32 rx_bd_ci;
 
 	u32 max_frm_size;
-	u32 jumbo_support;
+	u32 rxmem;
 
 	int csum_offload_on_tx_path;
 	int csum_offload_on_rx_path;
@@ -472,7 +478,7 @@
  * @lp:         Pointer to axienet local structure
  * @offset:     Address offset from the base address of Axi Ethernet core
  *
- * returns: The contents of the Axi Ethernet register
+ * Return: The contents of the Axi Ethernet register
  *
  * This function returns the contents of the corresponding register.
  */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 28b7e7d..4208dd7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -117,7 +117,7 @@
  * @lp:		Pointer to axienet local structure
  * @reg:	Address offset from the base address of the Axi DMA core
  *
- * returns: The contents of the Axi DMA register
+ * Return: The contents of the Axi DMA register
  *
  * This function returns the contents of the corresponding Axi DMA register.
  */
@@ -179,8 +179,7 @@
  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
  * @ndev:	Pointer to the net_device structure
  *
- * returns: 0, on success
- *	    -ENOMEM, on failure
+ * Return: 0, on success -ENOMEM, on failure
  *
  * This function is called to initialize the Rx and Tx DMA descriptor
  * rings. This initializes the descriptors with required default values
@@ -198,9 +197,7 @@
 	lp->tx_bd_tail = 0;
 	lp->rx_bd_ci = 0;
 
-	/*
-	 * Allocate the Tx and Rx buffer descriptors.
-	 */
+	/* Allocate the Tx and Rx buffer descriptors. */
 	lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 					  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 					  &lp->tx_bd_p, GFP_KERNEL);
@@ -263,7 +260,8 @@
 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 
 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
-	 * halted state. This will make the Rx side ready for reception.*/
+	 * halted state. This will make the Rx side ready for reception.
+	 */
 	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -273,7 +271,8 @@
 
 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 	 * Tx channel is now ready to run. But only after we write to the
-	 * tail pointer register that the Tx channel will start transmitting */
+	 * tail pointer register that the Tx channel will start transmitting.
+	 */
 	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -320,7 +319,7 @@
  * @ndev:	Pointer to the net_device structure
  * @p:		6 byte Address to be written as MAC address
  *
- * returns: 0 for all conditions. Presently, there is no failure case.
+ * Return: 0 for all conditions. Presently, there is no failure case.
  *
  * This function is called to initialize the MAC address of the Axi Ethernet
  * core. It calls the core specific axienet_set_mac_address. This is the
@@ -354,7 +353,8 @@
 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 		/* We must make the kernel realize we had to move into
 		 * promiscuous mode. If it was a promiscuous mode request
-		 * the flag is already set. If not we set it. */
+		 * the flag is already set. If not we set it.
+		 */
 		ndev->flags |= IFF_PROMISC;
 		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 		reg |= XAE_FMI_PM_MASK;
@@ -438,14 +438,15 @@
 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 	 * process of Axi DMA takes a while to complete as all pending
 	 * commands/transfers will be flushed or completed during this
-	 * reset process. */
+	 * reset process.
+	 */
 	axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
 	timeout = DELAY_OF_ONE_MILLISEC;
 	while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
 		udelay(1);
 		if (--timeout == 0) {
-			dev_err(dev, "axienet_device_reset DMA "
-				"reset timeout!\n");
+			netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+				   __func__);
 			break;
 		}
 	}
@@ -471,19 +472,21 @@
 	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
 
 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+	lp->options |= XAE_OPTION_VLAN;
 	lp->options &= (~XAE_OPTION_JUMBO);
 
 	if ((ndev->mtu > XAE_MTU) &&
-	    (ndev->mtu <= XAE_JUMBO_MTU) &&
-	    (lp->jumbo_support)) {
-		lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
-				   XAE_TRL_SIZE;
-		lp->options |= XAE_OPTION_JUMBO;
+		(ndev->mtu <= XAE_JUMBO_MTU)) {
+		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+					XAE_TRL_SIZE;
+
+		if (lp->max_frm_size <= lp->rxmem)
+			lp->options |= XAE_OPTION_JUMBO;
 	}
 
 	if (axienet_dma_bd_init(ndev)) {
-		dev_err(&ndev->dev, "axienet_device_reset descriptor "
-			"allocation failed\n");
+		netdev_err(ndev, "%s: descriptor allocation failed\n",
+			   __func__);
 	}
 
 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -497,7 +500,8 @@
 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 
 	/* Sync default options with HW but leave receiver and
-	 * transmitter disabled.*/
+	 * transmitter disabled.
+	 */
 	axienet_setoptions(ndev, lp->options &
 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 	axienet_set_mac_address(ndev, NULL);
@@ -558,8 +562,8 @@
 			lp->last_link = link_state;
 			phy_print_status(phy);
 		} else {
-			dev_err(&ndev->dev, "Error setting Axi Ethernet "
-				"mac speed\n");
+			netdev_err(ndev,
+				   "Error setting Axi Ethernet mac speed\n");
 		}
 	}
 }
@@ -617,7 +621,7 @@
  * @lp:		Pointer to the axienet_local structure
  * @num_frag:	The number of BDs to check for
  *
- * returns: 0, on success
+ * Return: 0, on success
  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
  *
  * This function is invoked before BDs are allocated and transmission starts.
@@ -640,7 +644,7 @@
  * @skb:	sk_buff pointer that contains data to be Txed.
  * @ndev:	Pointer to net_device structure.
  *
- * returns: NETDEV_TX_OK, on success
+ * Return: NETDEV_TX_OK, on success
  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
  *
  * This function is invoked from upper layers to initiate transmission. The
@@ -726,15 +730,15 @@
 	u32 csumstatus;
 	u32 size = 0;
 	u32 packets = 0;
-	dma_addr_t tail_p;
+	dma_addr_t tail_p = 0;
 	struct axienet_local *lp = netdev_priv(ndev);
 	struct sk_buff *skb, *new_skb;
 	struct axidma_bd *cur_p;
 
-	tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 
 	while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 		skb = (struct sk_buff *) (cur_p->sw_id_offset);
 		length = cur_p->app4 & 0x0000FFFF;
 
@@ -786,7 +790,8 @@
 	ndev->stats.rx_packets += packets;
 	ndev->stats.rx_bytes += size;
 
-	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+	if (tail_p)
+		axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 }
 
 /**
@@ -794,7 +799,7 @@
  * @irq:	irq number
  * @_ndev:	net_device pointer
  *
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
  *
  * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
  * to complete the BD processing.
@@ -808,6 +813,7 @@
 
 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+		axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 		axienet_start_xmit_done(lp->ndev);
 		goto out;
 	}
@@ -831,9 +837,9 @@
 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
 		tasklet_schedule(&lp->dma_err_tasklet);
+		axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 	}
 out:
-	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 	return IRQ_HANDLED;
 }
 
@@ -842,7 +848,7 @@
  * @irq:	irq number
  * @_ndev:	net_device pointer
  *
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
  *
  * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
  * processing.
@@ -856,6 +862,7 @@
 
 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+		axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 		axienet_recv(lp->ndev);
 		goto out;
 	}
@@ -879,9 +886,9 @@
 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
 		tasklet_schedule(&lp->dma_err_tasklet);
+		axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 	}
 out:
-	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 	return IRQ_HANDLED;
 }
 
@@ -891,7 +898,7 @@
  * axienet_open - Driver open routine.
  * @ndev:	Pointer to net_device structure
  *
- * returns: 0, on success.
+ * Return: 0, on success.
  *	    -ENODEV, if PHY cannot be connected to
  *	    non-zero error value on failure
  *
@@ -914,7 +921,8 @@
 	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
 	 * When we do an Axi Ethernet reset, it resets the complete core
 	 * including the MDIO. If MDIO is not disabled when the reset
-	 * process is started, MDIO will be broken afterwards. */
+	 * process is started, MDIO will be broken afterwards.
+	 */
 	axienet_iow(lp, XAE_MDIO_MC_OFFSET,
 		    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
 	axienet_device_reset(ndev);
@@ -925,14 +933,20 @@
 		return ret;
 
 	if (lp->phy_node) {
-		lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+		if (lp->phy_type == XAE_PHY_TYPE_GMII) {
+			lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
 					     axienet_adjust_link, 0,
 					     PHY_INTERFACE_MODE_GMII);
-		if (!lp->phy_dev) {
-			dev_err(lp->dev, "of_phy_connect() failed\n");
-			return -ENODEV;
+		} else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
+			lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+					     axienet_adjust_link, 0,
+					     PHY_INTERFACE_MODE_RGMII_ID);
 		}
-		phy_start(lp->phy_dev);
+
+		if (!lp->phy_dev)
+			dev_err(lp->dev, "of_phy_connect() failed\n");
+		else
+			phy_start(lp->phy_dev);
 	}
 
 	/* Enable tasklets for Axi DMA error handling */
@@ -965,7 +979,7 @@
  * axienet_stop - Driver stop routine.
  * @ndev:	Pointer to net_device structure
  *
- * returns: 0, on success.
+ * Return: 0, on success.
  *
  * This is the driver stop routine. It calls phy_disconnect to stop the PHY
  * device. It also removes the interrupt handlers and disables the interrupts.
@@ -1005,7 +1019,7 @@
  * @ndev:	Pointer to net_device structure
  * @new_mtu:	New mtu value to be applied
  *
- * returns: Always returns 0 (success).
+ * Return: Always returns 0 (success).
  *
  * This is the change mtu driver routine. It checks if the Axi Ethernet
  * hardware supports jumbo frames before changing the mtu. This can be
@@ -1017,15 +1031,15 @@
 
 	if (netif_running(ndev))
 		return -EBUSY;
-	if (lp->jumbo_support) {
-		if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
-			return -EINVAL;
-		ndev->mtu = new_mtu;
-	} else {
-		if ((new_mtu > XAE_MTU) || (new_mtu < 64))
-			return -EINVAL;
-		ndev->mtu = new_mtu;
-	}
+
+	if ((new_mtu + VLAN_ETH_HLEN +
+		XAE_TRL_SIZE) > lp->rxmem)
+		return -EINVAL;
+
+	if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+		return -EINVAL;
+
+	ndev->mtu = new_mtu;
 
 	return 0;
 }
@@ -1072,6 +1086,8 @@
  * not be found, the function returns -ENODEV. This function calls the
  * relevant PHY ethtool API to get the PHY settings.
  * Issue "ethtool ethX" under linux prompt to execute this function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
  */
 static int axienet_ethtools_get_settings(struct net_device *ndev,
 					 struct ethtool_cmd *ecmd)
@@ -1093,6 +1109,8 @@
  * relevant PHY ethtool API to set the PHY.
  * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
  * function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
  */
 static int axienet_ethtools_set_settings(struct net_device *ndev,
 					 struct ethtool_cmd *ecmd)
@@ -1127,6 +1145,8 @@
  *
  * This implements ethtool command for getting the total register length
  * information.
+ *
+ * Return: the total regs length
  */
 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
 {
@@ -1213,11 +1233,13 @@
  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
  *				     settings.
  * @ndev:	Pointer to net_device structure
- * @epauseparam:Pointer to ethtool_pauseparam structure
+ * @epauseparm:Pointer to ethtool_pauseparam structure
  *
  * This implements ethtool command for enabling flow control on Rx and Tx
  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
  * function.
+ *
+ * Return: 0 on success, -EFAULT if device is running
  */
 static int
 axienet_ethtools_set_pauseparam(struct net_device *ndev,
@@ -1227,8 +1249,8 @@
 	struct axienet_local *lp = netdev_priv(ndev);
 
 	if (netif_running(ndev)) {
-		printk(KERN_ERR	"%s: Please stop netif before applying "
-		       "configruation\n", ndev->name);
+		netdev_err(ndev,
+			   "Please stop netif before applying configuration\n");
 		return -EFAULT;
 	}
 
@@ -1254,6 +1276,8 @@
  * This implements ethtool command for getting the DMA interrupt coalescing
  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
  * execute this function.
+ *
+ * Return: 0 always
  */
 static int axienet_ethtools_get_coalesce(struct net_device *ndev,
 					 struct ethtool_coalesce *ecoalesce)
@@ -1277,6 +1301,8 @@
  * This implements ethtool command for setting the DMA interrupt coalescing
  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
  * prompt to execute this function.
+ *
+ * Return: 0, on success, Non-zero error value on failure.
  */
 static int axienet_ethtools_set_coalesce(struct net_device *ndev,
 					 struct ethtool_coalesce *ecoalesce)
@@ -1284,8 +1310,8 @@
 	struct axienet_local *lp = netdev_priv(ndev);
 
 	if (netif_running(ndev)) {
-		printk(KERN_ERR	"%s: Please stop netif before applying "
-		       "configruation\n", ndev->name);
+		netdev_err(ndev,
+			   "Please stop netif before applying configuration\n");
 		return -EFAULT;
 	}
 
@@ -1354,7 +1380,8 @@
 	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
 	 * When we do an Axi Ethernet reset, it resets the complete core
 	 * including the MDIO. So if MDIO is not disabled when the reset
-	 * process is started, MDIO will be broken afterwards. */
+	 * process is started, MDIO will be broken afterwards.
+	 */
 	axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
 		    ~XAE_MDIO_MC_MDIOEN_MASK));
 
@@ -1425,7 +1452,8 @@
 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 
 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
-	 * halted state. This will make the Rx side ready for reception.*/
+	 * halted state. This will make the Rx side ready for reception.
+	 */
 	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -1435,7 +1463,8 @@
 
 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 	 * Tx channel is now ready to run. But only after we write to the
-	 * tail pointer register that the Tx channel will start transmitting */
+	 * tail pointer register that the Tx channel will start transmitting
+	 */
 	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -1451,7 +1480,8 @@
 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 
 	/* Sync default options with HW but leave receiver and
-	 * transmitter disabled.*/
+	 * transmitter disabled.
+	 */
 	axienet_setoptions(ndev, lp->options &
 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 	axienet_set_mac_address(ndev, NULL);
@@ -1460,11 +1490,10 @@
 }
 
 /**
- * axienet_of_probe - Axi Ethernet probe function.
- * @op:		Pointer to platform device structure.
- * @match:	Pointer to device id structure
+ * axienet_probe - Axi Ethernet probe function.
+ * @pdev:	Pointer to platform device structure.
  *
- * returns: 0, on success
+ * Return: 0, on success
  *	    Non-zero error value on failure.
  *
  * This is the probe routine for Axi Ethernet driver. This is called before
@@ -1472,22 +1501,23 @@
  * device. Parses through device tree and populates fields of
  * axienet_local. It registers the Ethernet device.
  */
-static int axienet_of_probe(struct platform_device *op)
+static int axienet_probe(struct platform_device *pdev)
 {
-	__be32 *p;
-	int size, ret = 0;
+	int ret;
 	struct device_node *np;
 	struct axienet_local *lp;
 	struct net_device *ndev;
-	const void *addr;
+	u8 mac_addr[6];
+	struct resource *ethres, dmares;
+	u32 value;
 
 	ndev = alloc_etherdev(sizeof(*lp));
 	if (!ndev)
 		return -ENOMEM;
 
-	platform_set_drvdata(op, ndev);
+	platform_set_drvdata(pdev, ndev);
 
-	SET_NETDEV_DEV(ndev, &op->dev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
 	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
 	ndev->features = NETIF_F_SG;
 	ndev->netdev_ops = &axienet_netdev_ops;
@@ -1495,21 +1525,23 @@
 
 	lp = netdev_priv(ndev);
 	lp->ndev = ndev;
-	lp->dev = &op->dev;
+	lp->dev = &pdev->dev;
 	lp->options = XAE_OPTION_DEFAULTS;
 	/* Map device registers */
-	lp->regs = of_iomap(op->dev.of_node, 0);
+	ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
 	if (!lp->regs) {
-		dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+		dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
 		ret = -ENOMEM;
-		goto nodev;
+		goto free_netdev;
 	}
+
 	/* Setup checksum offload, but default to off if not specified */
 	lp->features = 0;
 
-	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
-	if (p) {
-		switch (be32_to_cpup(p)) {
+	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
+	if (!ret) {
+		switch (value) {
 		case 1:
 			lp->csum_offload_on_tx_path =
 				XAE_FEATURE_PARTIAL_TX_CSUM;
@@ -1528,9 +1560,9 @@
 			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
 		}
 	}
-	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
-	if (p) {
-		switch (be32_to_cpup(p)) {
+	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
+	if (!ret) {
+		switch (value) {
 		case 1:
 			lp->csum_offload_on_rx_path =
 				XAE_FEATURE_PARTIAL_RX_CSUM;
@@ -1546,82 +1578,77 @@
 		}
 	}
 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
-	 * a larger Rx/Tx Memory. Typically, the size must be more than or
-	 * equal to 16384 bytes, so that we can enable jumbo option and start
-	 * supporting jumbo frames. Here we check for memory allocated for
-	 * Rx/Tx in the hardware from the device-tree and accordingly set
-	 * flags. */
-	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
-	if (p) {
-		if ((be32_to_cpup(p)) >= 0x4000)
-			lp->jumbo_support = 1;
-	}
-	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
-	if (p)
-		lp->phy_type = be32_to_cpup(p);
+	 * a larger Rx/Tx Memory. Typically, the size must be large so that
+	 * we can enable jumbo option and start supporting jumbo frames.
+	 * Here we check for memory allocated for Rx/Tx in the hardware from
+	 * the device-tree and accordingly set flags.
+	 */
+	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+	of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
 
 	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
-	np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
-	if (!np) {
-		dev_err(&op->dev, "could not find DMA node\n");
-		ret = -ENODEV;
-		goto err_iounmap;
+	np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
+	if (IS_ERR(np)) {
+		dev_err(&pdev->dev, "could not find DMA node\n");
+		ret = PTR_ERR(np);
+		goto free_netdev;
 	}
-	lp->dma_regs = of_iomap(np, 0);
-	if (lp->dma_regs) {
-		dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
-	} else {
-		dev_err(&op->dev, "unable to map DMA registers\n");
-		of_node_put(np);
+	ret = of_address_to_resource(np, 0, &dmares);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to get DMA resource\n");
+		goto free_netdev;
+	}
+	lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+	if (!lp->dma_regs) {
+		dev_err(&pdev->dev, "could not map DMA regs\n");
+		ret = -ENOMEM;
+		goto free_netdev;
 	}
 	lp->rx_irq = irq_of_parse_and_map(np, 1);
 	lp->tx_irq = irq_of_parse_and_map(np, 0);
 	of_node_put(np);
 	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
-		dev_err(&op->dev, "could not determine irqs\n");
+		dev_err(&pdev->dev, "could not determine irqs\n");
 		ret = -ENOMEM;
-		goto err_iounmap_2;
+		goto free_netdev;
 	}
 
 	/* Retrieve the MAC address */
-	addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
-	if ((!addr) || (size != 6)) {
-		dev_err(&op->dev, "could not find MAC address\n");
-		ret = -ENODEV;
-		goto err_iounmap_2;
+	ret = of_property_read_u8_array(pdev->dev.of_node,
+					"local-mac-address", mac_addr, 6);
+	if (ret) {
+		dev_err(&pdev->dev, "could not find MAC address\n");
+		goto free_netdev;
 	}
-	axienet_set_mac_address(ndev, (void *) addr);
+	axienet_set_mac_address(ndev, (void *)mac_addr);
 
 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
 
-	lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
-	ret = axienet_mdio_setup(lp, op->dev.of_node);
-	if (ret)
-		dev_warn(&op->dev, "error registering MDIO bus\n");
+	lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+	if (lp->phy_node) {
+		ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+		if (ret)
+			dev_warn(&pdev->dev, "error registering MDIO bus\n");
+	}
 
 	ret = register_netdev(lp->ndev);
 	if (ret) {
 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
-		goto err_iounmap_2;
+		goto free_netdev;
 	}
 
 	return 0;
 
-err_iounmap_2:
-	if (lp->dma_regs)
-		iounmap(lp->dma_regs);
-err_iounmap:
-	iounmap(lp->regs);
-nodev:
+free_netdev:
 	free_netdev(ndev);
-	ndev = NULL;
+
 	return ret;
 }
 
-static int axienet_of_remove(struct platform_device *op)
+static int axienet_remove(struct platform_device *pdev)
 {
-	struct net_device *ndev = platform_get_drvdata(op);
+	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct axienet_local *lp = netdev_priv(ndev);
 
 	axienet_mdio_teardown(lp);
@@ -1630,24 +1657,21 @@
 	of_node_put(lp->phy_node);
 	lp->phy_node = NULL;
 
-	iounmap(lp->regs);
-	if (lp->dma_regs)
-		iounmap(lp->dma_regs);
 	free_netdev(ndev);
 
 	return 0;
 }
 
-static struct platform_driver axienet_of_driver = {
-	.probe = axienet_of_probe,
-	.remove = axienet_of_remove,
+static struct platform_driver axienet_driver = {
+	.probe = axienet_probe,
+	.remove = axienet_remove,
 	.driver = {
 		 .name = "xilinx_axienet",
 		 .of_match_table = axienet_of_match,
 	},
 };
 
-module_platform_driver(axienet_of_driver);
+module_platform_driver(axienet_driver);
 
 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
 MODULE_AUTHOR("Xilinx");
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 3b67d60..2a5a168 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -37,7 +37,7 @@
  * @phy_id:	Address of the PHY device
  * @reg:	PHY register to read
  *
- * returns:	The register contents on success, -ETIMEDOUT on a timeout
+ * Return:	The register contents on success, -ETIMEDOUT on a timeout
  *
  * Reads the contents of the requested register from the requested PHY
  * address by first writing the details into MCR register. After a while
@@ -80,7 +80,7 @@
  * @reg:	PHY register to write to
  * @val:	Value to be written into the register
  *
- * returns:	0 on success, -ETIMEDOUT on a timeout
+ * Return:	0 on success, -ETIMEDOUT on a timeout
  *
  * Writes the value to the requested register by first writing the value
  * into MWD register. The the MCR register is then appropriately setup
@@ -119,7 +119,7 @@
  * @lp:		Pointer to axienet local data structure.
  * @np:		Pointer to device node
  *
- * returns:	0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * Return:	0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
  *		mdiobus_alloc (to allocate memory for mii bus structure) fails.
  *
  * Sets up the MDIO interface by initializing the MDIO clock and enabling the
@@ -161,19 +161,19 @@
 
 	np1 = of_find_node_by_name(NULL, "cpu");
 	if (!np1) {
-		printk(KERN_WARNING "%s(): Could not find CPU device node.",
-		       __func__);
-		printk(KERN_WARNING "Setting MDIO clock divisor to "
-		       "default %d\n", DEFAULT_CLOCK_DIVISOR);
+		netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+		netdev_warn(lp->ndev,
+			    "Setting MDIO clock divisor to default %d\n",
+			    DEFAULT_CLOCK_DIVISOR);
 		clk_div = DEFAULT_CLOCK_DIVISOR;
 		goto issue;
 	}
 	property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
 	if (!property_p) {
-		printk(KERN_WARNING "%s(): Could not find CPU property: "
-		       "clock-frequency.", __func__);
-		printk(KERN_WARNING "Setting MDIO clock divisor to "
-		       "default %d\n", DEFAULT_CLOCK_DIVISOR);
+		netdev_warn(lp->ndev, "clock-frequency property not found.\n");
+		netdev_warn(lp->ndev,
+			    "Setting MDIO clock divisor to default %d\n",
+			    DEFAULT_CLOCK_DIVISOR);
 		clk_div = DEFAULT_CLOCK_DIVISOR;
 		of_node_put(np1);
 		goto issue;
@@ -183,12 +183,14 @@
 	clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
 	/* If there is any remainder from the division of
 	 * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
-	 * 1 to the clock divisor or we will surely be above 2.5 MHz */
+	 * 1 to the clock divisor or we will surely be above 2.5 MHz
+	 */
 	if (host_clock % (MAX_MDIO_FREQ * 2))
 		clk_div++;
 
-	printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
-	       "on %u Hz host clock.\n", __func__, clk_div, host_clock);
+	netdev_dbg(lp->ndev,
+		   "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
+		   clk_div, host_clock);
 
 	of_node_put(np1);
 issue:
diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig
index 69f56a6..d6208a4 100644
--- a/drivers/net/ethernet/xircom/Kconfig
+++ b/drivers/net/ethernet/xircom/Kconfig
@@ -7,9 +7,7 @@
 	default y
 	depends on PCMCIA
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question doesn't directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index b81bc9f..af3432f 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -8,9 +8,7 @@
 	depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
 		   IXP4XX_NPE && IXP4XX_QMGR)
 	---help---
-	  If you have a network (Ethernet) card belonging to this class, say Y
-	  and read the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
+	  If you have a network (Ethernet) card belonging to this class, say Y.
 
 	  Note that the answer to this question does not directly affect the
 	  kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index cc27dea..9956680 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -414,7 +414,7 @@
 	smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
 	mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
 
-	DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
+	DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
 	DB_SMT("SRF: state SR%d Threshold %d\n",
 		smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
 #ifdef	DEBUG
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
new file mode 100644
index 0000000..78d49d1
--- /dev/null
+++ b/drivers/net/geneve.c
@@ -0,0 +1,523 @@
+/*
+ * GENEVE: Generic Network Virtualization Encapsulation
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hash.h>
+#include <net/rtnetlink.h>
+#include <net/geneve.h>
+
+#define GENEVE_NETDEV_VER	"0.6"
+
+#define GENEVE_UDP_PORT		6081
+
+#define GENEVE_N_VID		(1u << 24)
+#define GENEVE_VID_MASK		(GENEVE_N_VID - 1)
+
+#define VNI_HASH_BITS		10
+#define VNI_HASH_SIZE		(1<<VNI_HASH_BITS)
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-network namespace private data for this module */
+struct geneve_net {
+	struct list_head  geneve_list;
+	struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Pseudo network device */
+struct geneve_dev {
+	struct hlist_node  hlist;	/* vni hash table */
+	struct net	   *net;	/* netns for packet i/o */
+	struct net_device  *dev;	/* netdev for geneve tunnel */
+	struct geneve_sock *sock;	/* socket used for geneve tunnel */
+	u8                 vni[3];	/* virtual network ID for tunnel */
+	u8                 ttl;		/* TTL override */
+	u8                 tos;		/* TOS override */
+	struct sockaddr_in remote;	/* IPv4 address for link partner */
+	struct list_head   next;	/* geneve's per namespace list */
+};
+
+static int geneve_net_id;
+
+static inline __u32 geneve_net_vni_hash(u8 vni[3])
+{
+	__u32 vnid;
+
+	vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
+	return hash_32(vnid, VNI_HASH_BITS);
+}
+
+/* geneve receive/decap routine */
+static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
+{
+	struct genevehdr *gnvh = geneve_hdr(skb);
+	struct geneve_dev *dummy, *geneve = NULL;
+	struct geneve_net *gn;
+	struct iphdr *iph = NULL;
+	struct pcpu_sw_netstats *stats;
+	struct hlist_head *vni_list_head;
+	int err = 0;
+	__u32 hash;
+
+	iph = ip_hdr(skb); /* Still outer IP header... */
+
+	gn = gs->rcv_data;
+
+	/* Find the device for this VNI */
+	hash = geneve_net_vni_hash(gnvh->vni);
+	vni_list_head = &gn->vni_list[hash];
+	hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+		if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
+		    iph->saddr == dummy->remote.sin_addr.s_addr) {
+			geneve = dummy;
+			break;
+		}
+	}
+	if (!geneve)
+		goto drop;
+
+	/* Drop packets w/ critical options,
+	 * since we don't support any...
+	 */
+	if (gnvh->critical)
+		goto drop;
+
+	skb_reset_mac_header(skb);
+	skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
+	skb->protocol = eth_type_trans(skb, geneve->dev);
+	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+	/* Ignore packet loops (and multicast echo) */
+	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+		goto drop;
+
+	skb_reset_network_header(skb);
+
+	iph = ip_hdr(skb); /* Now inner IP header... */
+	err = IP_ECN_decapsulate(iph, skb);
+
+	if (unlikely(err)) {
+		if (log_ecn_error)
+			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+					     &iph->saddr, iph->tos);
+		if (err > 1) {
+			++geneve->dev->stats.rx_frame_errors;
+			++geneve->dev->stats.rx_errors;
+			goto drop;
+		}
+	}
+
+	stats = this_cpu_ptr(geneve->dev->tstats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->syncp);
+
+	netif_rx(skb);
+
+	return;
+drop:
+	/* Consume bad packet */
+	kfree_skb(skb);
+}
+
+/* Setup stats when device is created */
+static int geneve_init(struct net_device *dev)
+{
+	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void geneve_uninit(struct net_device *dev)
+{
+	free_percpu(dev->tstats);
+}
+
+static int geneve_open(struct net_device *dev)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct net *net = geneve->net;
+	struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+	struct geneve_sock *gs;
+
+	gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
+	                     false, false);
+	if (IS_ERR(gs))
+		return PTR_ERR(gs);
+
+	geneve->sock = gs;
+
+	return 0;
+}
+
+static int geneve_stop(struct net_device *dev)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct geneve_sock *gs = geneve->sock;
+
+	geneve_sock_release(gs);
+
+	return 0;
+}
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct geneve_sock *gs = geneve->sock;
+	struct rtable *rt = NULL;
+	const struct iphdr *iip; /* interior IP header */
+	struct flowi4 fl4;
+	int err;
+	__be16 sport;
+	__u8 tos, ttl;
+
+	iip = ip_hdr(skb);
+
+	skb_reset_mac_header(skb);
+
+	/* TODO: port min/max limits should be configurable */
+	sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
+
+	tos = geneve->tos;
+	if (tos == 1)
+		tos = ip_tunnel_get_dsfield(iip, skb);
+
+	memset(&fl4, 0, sizeof(fl4));
+	fl4.flowi4_tos = RT_TOS(tos);
+	fl4.daddr = geneve->remote.sin_addr.s_addr;
+	rt = ip_route_output_key(geneve->net, &fl4);
+	if (IS_ERR(rt)) {
+		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+		dev->stats.tx_carrier_errors++;
+		goto tx_error;
+	}
+	if (rt->dst.dev == dev) { /* is this necessary? */
+		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
+		dev->stats.collisions++;
+		goto rt_tx_error;
+	}
+
+	tos = ip_tunnel_ecn_encap(tos, iip, skb);
+
+	ttl = geneve->ttl;
+	if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+		ttl = 1;
+
+	ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+	/* no need to handle local destination and encap bypass...yet... */
+
+	err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
+	                      tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
+	                      geneve->vni, 0, NULL, false,
+	                      !net_eq(geneve->net, dev_net(geneve->dev)));
+	if (err < 0)
+		ip_rt_put(rt);
+
+	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+
+	return NETDEV_TX_OK;
+
+rt_tx_error:
+	ip_rt_put(rt);
+tx_error:
+	dev->stats.tx_errors++;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops geneve_netdev_ops = {
+	.ndo_init		= geneve_init,
+	.ndo_uninit		= geneve_uninit,
+	.ndo_open		= geneve_open,
+	.ndo_stop		= geneve_stop,
+	.ndo_start_xmit		= geneve_xmit,
+	.ndo_get_stats64	= ip_tunnel_get_stats64,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
+};
+
+static void geneve_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+	strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+}
+
+static const struct ethtool_ops geneve_ethtool_ops = {
+	.get_drvinfo	= geneve_get_drvinfo,
+	.get_link	= ethtool_op_get_link,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type geneve_type = {
+	.name = "geneve",
+};
+
+/* Initialize the device structure. */
+static void geneve_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &geneve_netdev_ops;
+	dev->ethtool_ops = &geneve_ethtool_ops;
+	dev->destructor = free_netdev;
+
+	SET_NETDEV_DEVTYPE(dev, &geneve_type);
+
+	dev->tx_queue_len = 0;
+	dev->features    |= NETIF_F_LLTX;
+	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
+	dev->features    |= NETIF_F_RXCSUM;
+	dev->features    |= NETIF_F_GSO_SOFTWARE;
+
+	dev->vlan_features = dev->features;
+	dev->features    |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+	netif_keep_dst(dev);
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+}
+
+static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+	[IFLA_GENEVE_ID]		= { .type = NLA_U32 },
+	[IFLA_GENEVE_REMOTE]		= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+	[IFLA_GENEVE_TTL]		= { .type = NLA_U8 },
+	[IFLA_GENEVE_TOS]		= { .type = NLA_U8 },
+};
+
+static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+
+	if (!data)
+		return -EINVAL;
+
+	if (data[IFLA_GENEVE_ID]) {
+		__u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
+
+		if (vni >= GENEVE_VID_MASK)
+			return -ERANGE;
+	}
+
+	return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+			 struct nlattr *tb[], struct nlattr *data[])
+{
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
+	struct geneve_dev *dummy, *geneve = netdev_priv(dev);
+	struct hlist_head *vni_list_head;
+	struct sockaddr_in remote;	/* IPv4 address for link partner */
+	__u32 vni, hash;
+	int err;
+
+	if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+		return -EINVAL;
+
+	geneve->net = net;
+	geneve->dev = dev;
+
+	vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+	geneve->vni[0] = (vni & 0x00ff0000) >> 16;
+	geneve->vni[1] = (vni & 0x0000ff00) >> 8;
+	geneve->vni[2] =  vni & 0x000000ff;
+
+	geneve->remote.sin_addr.s_addr =
+		nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+	if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
+		return -EINVAL;
+
+	remote = geneve->remote;
+	hash = geneve_net_vni_hash(geneve->vni);
+	vni_list_head = &gn->vni_list[hash];
+	hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+		if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
+		    !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
+			return -EBUSY;
+	}
+
+	if (tb[IFLA_ADDRESS] == NULL)
+		eth_hw_addr_random(dev);
+
+	err = register_netdevice(dev);
+	if (err)
+		return err;
+
+	if (data[IFLA_GENEVE_TTL])
+		geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+
+	if (data[IFLA_GENEVE_TOS])
+		geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+
+	list_add(&geneve->next, &gn->geneve_list);
+
+	hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+
+	return 0;
+}
+
+static void geneve_dellink(struct net_device *dev, struct list_head *head)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+
+	if (!hlist_unhashed(&geneve->hlist))
+		hlist_del_rcu(&geneve->hlist);
+
+	list_del(&geneve->next);
+	unregister_netdevice_queue(dev, head);
+}
+
+static size_t geneve_get_size(const struct net_device *dev)
+{
+	return nla_total_size(sizeof(__u32)) +	/* IFLA_GENEVE_ID */
+		nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
+		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
+		0;
+}
+
+static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	__u32 vni;
+
+	vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
+	if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
+		goto nla_put_failure;
+
+	if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
+			    geneve->remote.sin_addr.s_addr))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
+	    nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops geneve_link_ops __read_mostly = {
+	.kind		= "geneve",
+	.maxtype	= IFLA_GENEVE_MAX,
+	.policy		= geneve_policy,
+	.priv_size	= sizeof(struct geneve_dev),
+	.setup		= geneve_setup,
+	.validate	= geneve_validate,
+	.newlink	= geneve_newlink,
+	.dellink	= geneve_dellink,
+	.get_size	= geneve_get_size,
+	.fill_info	= geneve_fill_info,
+};
+
+static __net_init int geneve_init_net(struct net *net)
+{
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
+	unsigned int h;
+
+	INIT_LIST_HEAD(&gn->geneve_list);
+
+	for (h = 0; h < VNI_HASH_SIZE; ++h)
+		INIT_HLIST_HEAD(&gn->vni_list[h]);
+
+	return 0;
+}
+
+static void __net_exit geneve_exit_net(struct net *net)
+{
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
+	struct geneve_dev *geneve, *next;
+	struct net_device *dev, *aux;
+	LIST_HEAD(list);
+
+	rtnl_lock();
+
+	/* gather any geneve devices that were moved into this ns */
+	for_each_netdev_safe(net, dev, aux)
+		if (dev->rtnl_link_ops == &geneve_link_ops)
+			unregister_netdevice_queue(dev, &list);
+
+	/* now gather any other geneve devices that were created in this ns */
+	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
+		/* If geneve->dev is in the same netns, it was already added
+		 * to the list by the previous loop.
+		 */
+		if (!net_eq(dev_net(geneve->dev), net))
+			unregister_netdevice_queue(geneve->dev, &list);
+	}
+
+	/* unregister the devices gathered above */
+	unregister_netdevice_many(&list);
+	rtnl_unlock();
+}
+
+static struct pernet_operations geneve_net_ops = {
+	.init = geneve_init_net,
+	.exit = geneve_exit_net,
+	.id   = &geneve_net_id,
+	.size = sizeof(struct geneve_net),
+};
+
+static int __init geneve_init_module(void)
+{
+	int rc;
+
+	rc = register_pernet_subsys(&geneve_net_ops);
+	if (rc)
+		goto out1;
+
+	rc = rtnl_link_register(&geneve_link_ops);
+	if (rc)
+		goto out2;
+
+	return 0;
+out2:
+	unregister_pernet_subsys(&geneve_net_ops);
+out1:
+	return rc;
+}
+late_initcall(geneve_init_module);
+
+static void __exit geneve_cleanup_module(void)
+{
+	rtnl_link_unregister(&geneve_link_ops);
+	unregister_pernet_subsys(&geneve_net_ops);
+}
+module_exit(geneve_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(GENEVE_NETDEV_VER);
+MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
+MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("geneve");
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 63ff08a..7856b6c 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -76,7 +76,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
-#include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/rtnetlink.h>
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 41071d3..dd45440 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -161,6 +161,7 @@
 	unsigned char mac_adr[ETH_ALEN];
 	bool link_state;	/* 0 - link up, 1 - link down */
 	int  ring_size;
+	u32  max_num_vrss_chns;
 };
 
 enum rndis_device_state {
@@ -611,6 +612,12 @@
 	u32 count; /* counter of batched packets */
 };
 
+struct netvsc_stats {
+	u64 packets;
+	u64 bytes;
+	struct u64_stats_sync syncp;
+};
+
 /* The context of the netvsc device  */
 struct net_device_context {
 	/* point back to our device context */
@@ -618,6 +625,9 @@
 	struct delayed_work dwork;
 	struct work_struct work;
 	u32 msg_enable; /* debug level */
+
+	struct netvsc_stats __percpu *tx_stats;
+	struct netvsc_stats __percpu *rx_stats;
 };
 
 /* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1e09243..23126a7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -228,13 +228,18 @@
 	struct netvsc_device *net_device;
 	struct nvsp_message *init_packet;
 	struct net_device *ndev;
+	int node;
 
 	net_device = get_outbound_net_device(device);
 	if (!net_device)
 		return -ENODEV;
 	ndev = net_device->ndev;
 
-	net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+	node = cpu_to_node(device->channel->target_cpu);
+	net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
+	if (!net_device->recv_buf)
+		net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+
 	if (!net_device->recv_buf) {
 		netdev_err(ndev, "unable to allocate receive "
 			"buffer of size %d\n", net_device->recv_buf_size);
@@ -322,7 +327,9 @@
 
 	/* Now setup the send buffer.
 	 */
-	net_device->send_buf = vzalloc(net_device->send_buf_size);
+	net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
+	if (!net_device->send_buf)
+		net_device->send_buf = vzalloc(net_device->send_buf_size);
 	if (!net_device->send_buf) {
 		netdev_err(ndev, "unable to allocate send "
 			   "buffer of size %d\n", net_device->send_buf_size);
@@ -744,6 +751,7 @@
 	u64 req_id;
 	int ret;
 	struct hv_page_buffer *pgbuf;
+	u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
 
 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
 	if (packet->is_data_pkt) {
@@ -770,32 +778,42 @@
 	if (out_channel->rescind)
 		return -ENODEV;
 
+	/*
+	 * It is possible that once we successfully place this packet
+	 * on the ringbuffer, we may stop the queue. In that case, we want
+	 * to notify the host independent of the xmit_more flag. We don't
+	 * need to be precise here; in the worst case we may signal the host
+	 * unnecessarily.
+	 */
+	if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
+		packet->xmit_more = false;
+
 	if (packet->page_buf_cnt) {
 		pgbuf = packet->cp_partial ? packet->page_buf +
 			packet->rmsg_pgcnt : packet->page_buf;
-		ret = vmbus_sendpacket_pagebuffer(out_channel,
-						  pgbuf,
-						  packet->page_buf_cnt,
-						  &nvmsg,
-						  sizeof(struct nvsp_message),
-						  req_id);
+		ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
+						      pgbuf,
+						      packet->page_buf_cnt,
+						      &nvmsg,
+						      sizeof(struct nvsp_message),
+						      req_id,
+						      VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+						      !packet->xmit_more);
 	} else {
-		ret = vmbus_sendpacket(
-				out_channel, &nvmsg,
-				sizeof(struct nvsp_message),
-				req_id,
-				VM_PKT_DATA_INBAND,
-				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
+					   sizeof(struct nvsp_message),
+					   req_id,
+					   VM_PKT_DATA_INBAND,
+					   VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+					   !packet->xmit_more);
 	}
 
 	if (ret == 0) {
 		atomic_inc(&net_device->num_outstanding_sends);
 		atomic_inc(&net_device->queue_sends[q_idx]);
 
-		if (hv_ringbuf_avail_percent(&out_channel->outbound) <
-			RING_AVAIL_PERCENT_LOWATER) {
-			netif_tx_stop_queue(netdev_get_tx_queue(
-					    ndev, q_idx));
+		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+			netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
 
 			if (atomic_read(&net_device->
 				queue_sends[q_idx]) < 1)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5993c7e..358475e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,6 +46,8 @@
 module_param(ring_size, int, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
+static int max_num_vrss_chns = 8;
+
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
 				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -196,12 +198,12 @@
 	struct flow_keys flow;
 	int data_len;
 
-	if (!skb_flow_dissect(skb, &flow) ||
-	    !(flow.n_proto == htons(ETH_P_IP) ||
-	      flow.n_proto == htons(ETH_P_IPV6)))
+	if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+	    !(flow.basic.n_proto == htons(ETH_P_IP) ||
+	      flow.basic.n_proto == htons(ETH_P_IPV6)))
 		return false;
 
-	if (flow.ip_proto == IPPROTO_TCP)
+	if (flow.basic.ip_proto == IPPROTO_TCP)
 		data_len = 12;
 	else
 		data_len = 8;
@@ -391,7 +393,7 @@
 	u32 skb_length;
 	u32 pkt_sz;
 	struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
-
+	struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
 
 	/* We will atmost need two pages to describe the rndis
 	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
@@ -580,8 +582,10 @@
 
 drop:
 	if (ret == 0) {
-		net->stats.tx_bytes += skb_length;
-		net->stats.tx_packets++;
+		u64_stats_update_begin(&tx_stats->syncp);
+		tx_stats->packets++;
+		tx_stats->bytes += skb_length;
+		u64_stats_update_end(&tx_stats->syncp);
 	} else {
 		if (ret != -EAGAIN) {
 			dev_kfree_skb_any(skb);
@@ -644,13 +648,17 @@
 				struct ndis_tcp_ip_checksum_info *csum_info)
 {
 	struct net_device *net;
+	struct net_device_context *net_device_ctx;
 	struct sk_buff *skb;
+	struct netvsc_stats *rx_stats;
 
 	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
 	if (!net || net->reg_state != NETREG_REGISTERED) {
 		packet->status = NVSP_STAT_FAIL;
 		return 0;
 	}
+	net_device_ctx = netdev_priv(net);
+	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
 
 	/* Allocate a skb - TODO direct I/O to pages? */
 	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
@@ -686,8 +694,10 @@
 	skb_record_rx_queue(skb, packet->channel->
 			    offermsg.offer.sub_channel_index);
 
-	net->stats.rx_packets++;
-	net->stats.rx_bytes += packet->total_data_buflen;
+	u64_stats_update_begin(&rx_stats->syncp);
+	rx_stats->packets++;
+	rx_stats->bytes += packet->total_data_buflen;
+	u64_stats_update_end(&rx_stats->syncp);
 
 	/*
 	 * Pass the skb back up. Network stack will deallocate the skb when it
@@ -747,12 +757,53 @@
 	ndevctx->device_ctx = hdev;
 	hv_set_drvdata(hdev, ndev);
 	device_info.ring_size = ring_size;
+	device_info.max_num_vrss_chns = max_num_vrss_chns;
 	rndis_filter_device_add(hdev, &device_info);
 	netif_tx_wake_all_queues(ndev);
 
 	return 0;
 }
 
+static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+						    struct rtnl_link_stats64 *t)
+{
+	struct net_device_context *ndev_ctx = netdev_priv(net);
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
+							    cpu);
+		struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
+							    cpu);
+		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+		unsigned int start;
+
+		do {
+			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+			tx_packets = tx_stats->packets;
+			tx_bytes = tx_stats->bytes;
+		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+		do {
+			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+			rx_packets = rx_stats->packets;
+			rx_bytes = rx_stats->bytes;
+		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+		t->tx_bytes	+= tx_bytes;
+		t->tx_packets	+= tx_packets;
+		t->rx_bytes	+= rx_bytes;
+		t->rx_packets	+= rx_packets;
+	}
+
+	t->tx_dropped	= net->stats.tx_dropped;
+	t->tx_errors	= net->stats.tx_dropped;
+
+	t->rx_dropped	= net->stats.rx_dropped;
+	t->rx_errors	= net->stats.rx_errors;
+
+	return t;
+}
 
 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
 {
@@ -804,6 +855,7 @@
 	.ndo_validate_addr =		eth_validate_addr,
 	.ndo_set_mac_address =		netvsc_set_mac_addr,
 	.ndo_select_queue =		netvsc_select_queue,
+	.ndo_get_stats64 =		netvsc_get_stats64,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller =		netvsc_poll_controller,
 #endif
@@ -855,6 +907,14 @@
 		netdev_notify_peers(net);
 }
 
+static void netvsc_free_netdev(struct net_device *netdev)
+{
+	struct net_device_context *net_device_ctx = netdev_priv(netdev);
+
+	free_percpu(net_device_ctx->tx_stats);
+	free_percpu(net_device_ctx->rx_stats);
+	free_netdev(netdev);
+}
 
 static int netvsc_probe(struct hv_device *dev,
 			const struct hv_vmbus_device_id *dev_id)
@@ -883,6 +943,18 @@
 		netdev_dbg(net, "netvsc msg_enable: %d\n",
 			   net_device_ctx->msg_enable);
 
+	net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+	if (!net_device_ctx->tx_stats) {
+		free_netdev(net);
+		return -ENOMEM;
+	}
+	net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+	if (!net_device_ctx->rx_stats) {
+		free_percpu(net_device_ctx->tx_stats);
+		free_netdev(net);
+		return -ENOMEM;
+	}
+
 	hv_set_drvdata(dev, net);
 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
 	INIT_WORK(&net_device_ctx->work, do_set_multicast);
@@ -906,10 +978,11 @@
 
 	/* Notify the netvsc driver of the new device */
 	device_info.ring_size = ring_size;
+	device_info.max_num_vrss_chns = max_num_vrss_chns;
 	ret = rndis_filter_device_add(dev, &device_info);
 	if (ret != 0) {
 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-		free_netdev(net);
+		netvsc_free_netdev(net);
 		hv_set_drvdata(dev, NULL);
 		return ret;
 	}
@@ -923,7 +996,7 @@
 	if (ret != 0) {
 		pr_err("Unable to register netdev.\n");
 		rndis_filter_device_remove(dev);
-		free_netdev(net);
+		netvsc_free_netdev(net);
 	} else {
 		schedule_delayed_work(&net_device_ctx->dwork, 0);
 	}
@@ -962,7 +1035,7 @@
 	 */
 	rndis_filter_device_remove(dev);
 
-	free_netdev(net);
+	netvsc_free_netdev(net);
 	return 0;
 }
 
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 35a482d..236aeb7 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1014,6 +1014,9 @@
 	struct ndis_recv_scale_cap rsscap;
 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
 	u32 mtu, size;
+	u32 num_rss_qs;
+	const struct cpumask *node_cpu_mask;
+	u32 num_possible_rss_qs;
 
 	rndis_device = get_rndis_device();
 	if (!rndis_device)
@@ -1101,9 +1104,18 @@
 	if (ret || rsscap.num_recv_que < 2)
 		goto out;
 
+	num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+
 	net_device->max_chn = rsscap.num_recv_que;
-	net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
-			       num_online_cpus() : rsscap.num_recv_que;
+
+	/*
+	 * We will limit the VRSS channels to the number CPUs in the NUMA node
+	 * the primary channel is currently bound to.
+	 */
+	node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
+	num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+	net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
 	if (net_device->num_chn == 1)
 		goto out;
 
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1a3c3e5..1dd5ab8 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -53,3 +53,13 @@
 
 	  This driver can also be built as a module. To do so, say M here.
 	  the module will be called 'cc2520'.
+
+config IEEE802154_ATUSB
+	tristate "ATUSB transceiver driver"
+	depends on IEEE802154_DRIVERS && MAC802154 && USB
+	---help---
+	  Say Y here to enable the ATUSB IEEE 802.15.4 wireless
+	  controller.
+
+	  This driver can also be built as a module. To do so say M here.
+	  The module will be called 'atusb'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index d77fa4d..cf1d2a6 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -2,3 +2,4 @@
 obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
 obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
 obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
+obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 67d00fb..f7bd9f3 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -35,6 +35,8 @@
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
 
+#include "at86rf230.h"
+
 struct at86rf230_local;
 /* at86rf2xx chip depend data.
  * All timings are in us.
@@ -45,12 +47,14 @@
 	u16 t_reset_to_off;
 	u16 t_off_to_aack;
 	u16 t_off_to_tx_on;
+	u16 t_off_to_sleep;
+	u16 t_sleep_to_off;
 	u16 t_frame;
 	u16 t_p_ack;
 	int rssi_base_val;
 
 	int (*set_channel)(struct at86rf230_local *, u8, u8);
-	int (*get_desense_steps)(struct at86rf230_local *, s32);
+	int (*set_txpower)(struct at86rf230_local *, s32);
 };
 
 #define AT86RF2XX_MAX_BUF		(127 + 3)
@@ -86,6 +90,7 @@
 	struct at86rf2xx_chip_data *data;
 	struct regmap *regmap;
 	int slp_tr;
+	bool sleep;
 
 	struct completion state_complete;
 	struct at86rf230_state_change state;
@@ -102,200 +107,6 @@
 	struct at86rf230_state_change tx;
 };
 
-#define RG_TRX_STATUS	(0x01)
-#define SR_TRX_STATUS		0x01, 0x1f, 0
-#define SR_RESERVED_01_3	0x01, 0x20, 5
-#define SR_CCA_STATUS		0x01, 0x40, 6
-#define SR_CCA_DONE		0x01, 0x80, 7
-#define RG_TRX_STATE	(0x02)
-#define SR_TRX_CMD		0x02, 0x1f, 0
-#define SR_TRAC_STATUS		0x02, 0xe0, 5
-#define RG_TRX_CTRL_0	(0x03)
-#define SR_CLKM_CTRL		0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL		0x03, 0x08, 3
-#define SR_PAD_IO_CLKM		0x03, 0x30, 4
-#define SR_PAD_IO		0x03, 0xc0, 6
-#define RG_TRX_CTRL_1	(0x04)
-#define SR_IRQ_POLARITY		0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE	0x04, 0x02, 1
-#define SR_SPI_CMD_MODE		0x04, 0x0c, 2
-#define SR_RX_BL_CTRL		0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON	0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN		0x04, 0x40, 6
-#define SR_PA_EXT_EN		0x04, 0x80, 7
-#define RG_PHY_TX_PWR	(0x05)
-#define SR_TX_PWR		0x05, 0x0f, 0
-#define SR_PA_LT		0x05, 0x30, 4
-#define SR_PA_BUF_LT		0x05, 0xc0, 6
-#define RG_PHY_RSSI	(0x06)
-#define SR_RSSI			0x06, 0x1f, 0
-#define SR_RND_VALUE		0x06, 0x60, 5
-#define SR_RX_CRC_VALID		0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL	(0x07)
-#define SR_ED_LEVEL		0x07, 0xff, 0
-#define RG_PHY_CC_CCA	(0x08)
-#define SR_CHANNEL		0x08, 0x1f, 0
-#define SR_CCA_MODE		0x08, 0x60, 5
-#define SR_CCA_REQUEST		0x08, 0x80, 7
-#define RG_CCA_THRES	(0x09)
-#define SR_CCA_ED_THRES		0x09, 0x0f, 0
-#define SR_RESERVED_09_1	0x09, 0xf0, 4
-#define RG_RX_CTRL	(0x0a)
-#define SR_PDT_THRES		0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1	0x0a, 0xf0, 4
-#define RG_SFD_VALUE	(0x0b)
-#define SR_SFD_VALUE		0x0b, 0xff, 0
-#define RG_TRX_CTRL_2	(0x0c)
-#define SR_OQPSK_DATA_RATE	0x0c, 0x03, 0
-#define SR_SUB_MODE		0x0c, 0x04, 2
-#define SR_BPSK_QPSK		0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN	0x0c, 0x10, 4
-#define SR_RESERVED_0c_5	0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE		0x0c, 0x80, 7
-#define RG_ANT_DIV	(0x0d)
-#define SR_ANT_CTRL		0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN	0x0d, 0x04, 2
-#define SR_ANT_DIV_EN		0x0d, 0x08, 3
-#define SR_RESERVED_0d_2	0x0d, 0x70, 4
-#define SR_ANT_SEL		0x0d, 0x80, 7
-#define RG_IRQ_MASK	(0x0e)
-#define SR_IRQ_MASK		0x0e, 0xff, 0
-#define RG_IRQ_STATUS	(0x0f)
-#define SR_IRQ_0_PLL_LOCK	0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK	0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START	0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END	0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE	0x0f, 0x10, 4
-#define SR_IRQ_5_AMI		0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR		0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW	0x0f, 0x80, 7
-#define RG_VREG_CTRL	(0x10)
-#define SR_RESERVED_10_6	0x10, 0x03, 0
-#define SR_DVDD_OK		0x10, 0x04, 2
-#define SR_DVREG_EXT		0x10, 0x08, 3
-#define SR_RESERVED_10_3	0x10, 0x30, 4
-#define SR_AVDD_OK		0x10, 0x40, 6
-#define SR_AVREG_EXT		0x10, 0x80, 7
-#define RG_BATMON	(0x11)
-#define SR_BATMON_VTH		0x11, 0x0f, 0
-#define SR_BATMON_HR		0x11, 0x10, 4
-#define SR_BATMON_OK		0x11, 0x20, 5
-#define SR_RESERVED_11_1	0x11, 0xc0, 6
-#define RG_XOSC_CTRL	(0x12)
-#define SR_XTAL_TRIM		0x12, 0x0f, 0
-#define SR_XTAL_MODE		0x12, 0xf0, 4
-#define RG_RX_SYN	(0x15)
-#define SR_RX_PDT_LEVEL		0x15, 0x0f, 0
-#define SR_RESERVED_15_2	0x15, 0x70, 4
-#define SR_RX_PDT_DIS		0x15, 0x80, 7
-#define RG_XAH_CTRL_1	(0x17)
-#define SR_RESERVED_17_8	0x17, 0x01, 0
-#define SR_AACK_PROM_MODE	0x17, 0x02, 1
-#define SR_AACK_ACK_TIME	0x17, 0x04, 2
-#define SR_RESERVED_17_5	0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT	0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT	0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE	0x17, 0x40, 6
-#define SR_RESERVED_17_1	0x17, 0x80, 7
-#define RG_FTN_CTRL	(0x18)
-#define SR_RESERVED_18_2	0x18, 0x7f, 0
-#define SR_FTN_START		0x18, 0x80, 7
-#define RG_PLL_CF	(0x1a)
-#define SR_RESERVED_1a_2	0x1a, 0x7f, 0
-#define SR_PLL_CF_START		0x1a, 0x80, 7
-#define RG_PLL_DCU	(0x1b)
-#define SR_RESERVED_1b_3	0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2	0x1b, 0x40, 6
-#define SR_PLL_DCU_START	0x1b, 0x80, 7
-#define RG_PART_NUM	(0x1c)
-#define SR_PART_NUM		0x1c, 0xff, 0
-#define RG_VERSION_NUM	(0x1d)
-#define SR_VERSION_NUM		0x1d, 0xff, 0
-#define RG_MAN_ID_0	(0x1e)
-#define SR_MAN_ID_0		0x1e, 0xff, 0
-#define RG_MAN_ID_1	(0x1f)
-#define SR_MAN_ID_1		0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0	(0x20)
-#define SR_SHORT_ADDR_0		0x20, 0xff, 0
-#define RG_SHORT_ADDR_1	(0x21)
-#define SR_SHORT_ADDR_1		0x21, 0xff, 0
-#define RG_PAN_ID_0	(0x22)
-#define SR_PAN_ID_0		0x22, 0xff, 0
-#define RG_PAN_ID_1	(0x23)
-#define SR_PAN_ID_1		0x23, 0xff, 0
-#define RG_IEEE_ADDR_0	(0x24)
-#define SR_IEEE_ADDR_0		0x24, 0xff, 0
-#define RG_IEEE_ADDR_1	(0x25)
-#define SR_IEEE_ADDR_1		0x25, 0xff, 0
-#define RG_IEEE_ADDR_2	(0x26)
-#define SR_IEEE_ADDR_2		0x26, 0xff, 0
-#define RG_IEEE_ADDR_3	(0x27)
-#define SR_IEEE_ADDR_3		0x27, 0xff, 0
-#define RG_IEEE_ADDR_4	(0x28)
-#define SR_IEEE_ADDR_4		0x28, 0xff, 0
-#define RG_IEEE_ADDR_5	(0x29)
-#define SR_IEEE_ADDR_5		0x29, 0xff, 0
-#define RG_IEEE_ADDR_6	(0x2a)
-#define SR_IEEE_ADDR_6		0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7	(0x2b)
-#define SR_IEEE_ADDR_7		0x2b, 0xff, 0
-#define RG_XAH_CTRL_0	(0x2c)
-#define SR_SLOTTED_OPERATION	0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES	0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES	0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0	(0x2d)
-#define SR_CSMA_SEED_0		0x2d, 0xff, 0
-#define RG_CSMA_SEED_1	(0x2e)
-#define SR_CSMA_SEED_1		0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD	0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK		0x2e, 0x10, 4
-#define SR_AACK_SET_PD		0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE	0x2e, 0xc0, 6
-#define RG_CSMA_BE	(0x2f)
-#define SR_MIN_BE		0x2f, 0x0f, 0
-#define SR_MAX_BE		0x2f, 0xf0, 4
-
-#define CMD_REG		0x80
-#define CMD_REG_MASK	0x3f
-#define CMD_WRITE	0x40
-#define CMD_FB		0x20
-
-#define IRQ_BAT_LOW	(1 << 7)
-#define IRQ_TRX_UR	(1 << 6)
-#define IRQ_AMI		(1 << 5)
-#define IRQ_CCA_ED	(1 << 4)
-#define IRQ_TRX_END	(1 << 3)
-#define IRQ_RX_START	(1 << 2)
-#define IRQ_PLL_UNL	(1 << 1)
-#define IRQ_PLL_LOCK	(1 << 0)
-
-#define IRQ_ACTIVE_HIGH	0
-#define IRQ_ACTIVE_LOW	1
-
-#define STATE_P_ON		0x00	/* BUSY */
-#define STATE_BUSY_RX		0x01
-#define STATE_BUSY_TX		0x02
-#define STATE_FORCE_TRX_OFF	0x03
-#define STATE_FORCE_TX_ON	0x04	/* IDLE */
-/* 0x05 */				/* INVALID_PARAMETER */
-#define STATE_RX_ON		0x06
-/* 0x07 */				/* SUCCESS */
-#define STATE_TRX_OFF		0x08
-#define STATE_TX_ON		0x09
-/* 0x0a - 0x0e */			/* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP		0x0F
-#define STATE_PREP_DEEP_SLEEP	0x10
-#define STATE_BUSY_RX_AACK	0x11
-#define STATE_BUSY_TX_ARET	0x12
-#define STATE_RX_AACK_ON	0x16
-#define STATE_TX_ARET_ON	0x19
-#define STATE_RX_ON_NOCLK	0x1C
-#define STATE_RX_AACK_ON_NOCLK	0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-#define TRX_STATE_MASK		(0x1F)
-
 #define AT86RF2XX_NUMREGS 0x3F
 
 static void
@@ -304,18 +115,66 @@
 			     const u8 state, void (*complete)(void *context),
 			     const bool irq_enable);
 
+static inline void
+at86rf230_sleep(struct at86rf230_local *lp)
+{
+	if (gpio_is_valid(lp->slp_tr)) {
+		gpio_set_value(lp->slp_tr, 1);
+		usleep_range(lp->data->t_off_to_sleep,
+			     lp->data->t_off_to_sleep + 10);
+		lp->sleep = true;
+	}
+}
+
+static inline void
+at86rf230_awake(struct at86rf230_local *lp)
+{
+	if (gpio_is_valid(lp->slp_tr)) {
+		gpio_set_value(lp->slp_tr, 0);
+		usleep_range(lp->data->t_sleep_to_off,
+			     lp->data->t_sleep_to_off + 100);
+		lp->sleep = false;
+	}
+}
+
 static inline int
 __at86rf230_write(struct at86rf230_local *lp,
 		  unsigned int addr, unsigned int data)
 {
-	return regmap_write(lp->regmap, addr, data);
+	bool sleep = lp->sleep;
+	int ret;
+
+	/* awake for register setting if sleep */
+	if (sleep)
+		at86rf230_awake(lp);
+
+	ret = regmap_write(lp->regmap, addr, data);
+
+	/* sleep again if was sleeping */
+	if (sleep)
+		at86rf230_sleep(lp);
+
+	return ret;
 }
 
 static inline int
 __at86rf230_read(struct at86rf230_local *lp,
 		 unsigned int addr, unsigned int *data)
 {
-	return regmap_read(lp->regmap, addr, data);
+	bool sleep = lp->sleep;
+	int ret;
+
+	/* awake for register setting if sleep */
+	if (sleep)
+		at86rf230_awake(lp);
+
+	ret = regmap_read(lp->regmap, addr, data);
+
+	/* sleep again if was sleeping */
+	if (sleep)
+		at86rf230_sleep(lp);
+
+	return ret;
 }
 
 static inline int
@@ -337,7 +196,20 @@
 		       unsigned int addr, unsigned int mask,
 		       unsigned int shift, unsigned int data)
 {
-	return regmap_update_bits(lp->regmap, addr, mask, data << shift);
+	bool sleep = lp->sleep;
+	int ret;
+
+	/* awake for register setting if sleep */
+	if (sleep)
+		at86rf230_awake(lp);
+
+	ret = regmap_update_bits(lp->regmap, addr, mask, data << shift);
+
+	/* sleep again if was sleeping */
+	if (sleep)
+		at86rf230_sleep(lp);
+
+	return ret;
 }
 
 static inline void
@@ -1010,7 +882,7 @@
 		if (lp->is_tx_from_off) {
 			lp->is_tx_from_off = false;
 			at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-						     at86rf230_xmit_tx_on,
+						     at86rf230_write_frame,
 						     false);
 		} else {
 			at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
@@ -1061,13 +933,34 @@
 static int
 at86rf230_start(struct ieee802154_hw *hw)
 {
-	return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
+	struct at86rf230_local *lp = hw->priv;
+
+	at86rf230_awake(lp);
+	enable_irq(lp->spi->irq);
+
+	return at86rf230_sync_state_change(lp, STATE_RX_AACK_ON);
 }
 
 static void
 at86rf230_stop(struct ieee802154_hw *hw)
 {
-	at86rf230_sync_state_change(hw->priv, STATE_FORCE_TRX_OFF);
+	struct at86rf230_local *lp = hw->priv;
+	u8 csma_seed[2];
+
+	at86rf230_sync_state_change(lp, STATE_FORCE_TRX_OFF);
+
+	disable_irq(lp->spi->irq);
+
+	/* It's recommended to set random new csma_seeds before sleep state.
+	 * Makes only sense in the stop callback, not doing this inside of
+	 * at86rf230_sleep, this is also used when we don't transmit afterwards
+	 * when calling start callback again.
+	 */
+	get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
+	at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
+	at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
+
+	at86rf230_sleep(lp);
 }
 
 static int
@@ -1076,6 +969,50 @@
 	return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
 }
 
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+	-9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+	-7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+	-10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+	-8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+	-9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+	-7800, -7600, -7400, -7200, -7000, -6800,
+};
+
+static inline int
+at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val)
+{
+	unsigned int cca_ed_thres;
+	int rc;
+
+	rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres);
+	if (rc < 0)
+		return rc;
+
+	switch (rssi_base_val) {
+	case -98:
+		lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+		lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+		lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+		break;
+	case -100:
+		lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+		lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+		lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+		break;
+	default:
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
 static int
 at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
 {
@@ -1098,6 +1035,10 @@
 	if (rc < 0)
 		return rc;
 
+	rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val);
+	if (rc < 0)
+		return rc;
+
 	/* This sets the symbol_duration according frequency on the 212.
 	 * TODO move this handling while set channel and page in cfg802154.
 	 * We can do that, this timings are according 802.15.4 standard.
@@ -1193,23 +1134,56 @@
 	return 0;
 }
 
+#define AT86RF23X_MAX_TX_POWERS 0xF
+static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+	400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600,
+	-800, -1200, -1700,
+};
+
+static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+	300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+	-900, -1200, -1700,
+};
+
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+	500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+	-800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+	-1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
 static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
+at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+	u32 i;
+
+	for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+		if (lp->hw->phy->supported.tx_powers[i] == mbm)
+			return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i);
+	}
+
+	return -EINVAL;
+}
+
+static int
+at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+	u32 i;
+
+	for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+		if (lp->hw->phy->supported.tx_powers[i] == mbm)
+			return at86rf230_write_subreg(lp, SR_TX_PWR_212, i);
+	}
+
+	return -EINVAL;
+}
+
+static int
+at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm)
 {
 	struct at86rf230_local *lp = hw->priv;
 
-	/* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
-	 * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
-	 * 0dB.
-	 * thus, supported values for db range from -26 to 5, for 31dB of
-	 * reduction to 0dB of reduction.
-	 */
-	if (db > 5 || db < -26)
-		return -EINVAL;
-
-	db = -(db - 5);
-
-	return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+	return lp->data->set_txpower(lp, mbm);
 }
 
 static int
@@ -1254,28 +1228,19 @@
 	return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-static int
-at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
-	return (level - lp->data->rssi_base_val) * 100 / 207;
-}
 
 static int
-at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
-	return (level - lp->data->rssi_base_val) / 2;
-}
-
-static int
-at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
 	struct at86rf230_local *lp = hw->priv;
+	u32 i;
 
-	if (level < lp->data->rssi_base_val || level > 30)
-		return -EINVAL;
+	for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+		if (hw->phy->supported.cca_ed_levels[i] == mbm)
+			return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i);
+	}
 
-	return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
-				      lp->data->get_desense_steps(lp, level));
+	return -EINVAL;
 }
 
 static int
@@ -1361,11 +1326,13 @@
 	.t_reset_to_off = 26,
 	.t_off_to_aack = 80,
 	.t_off_to_tx_on = 80,
+	.t_off_to_sleep = 35,
+	.t_sleep_to_off = 210,
 	.t_frame = 4096,
 	.t_p_ack = 545,
 	.rssi_base_val = -91,
 	.set_channel = at86rf23x_set_channel,
-	.get_desense_steps = at86rf23x_get_desens_steps
+	.set_txpower = at86rf23x_set_txpower,
 };
 
 static struct at86rf2xx_chip_data at86rf231_data = {
@@ -1374,11 +1341,13 @@
 	.t_reset_to_off = 37,
 	.t_off_to_aack = 110,
 	.t_off_to_tx_on = 110,
+	.t_off_to_sleep = 35,
+	.t_sleep_to_off = 380,
 	.t_frame = 4096,
 	.t_p_ack = 545,
 	.rssi_base_val = -91,
 	.set_channel = at86rf23x_set_channel,
-	.get_desense_steps = at86rf23x_get_desens_steps
+	.set_txpower = at86rf23x_set_txpower,
 };
 
 static struct at86rf2xx_chip_data at86rf212_data = {
@@ -1387,11 +1356,13 @@
 	.t_reset_to_off = 26,
 	.t_off_to_aack = 200,
 	.t_off_to_tx_on = 200,
+	.t_off_to_sleep = 35,
+	.t_sleep_to_off = 380,
 	.t_frame = 4096,
 	.t_p_ack = 545,
 	.rssi_base_val = -100,
 	.set_channel = at86rf212_set_channel,
-	.get_desense_steps = at86rf212_get_desens_steps
+	.set_txpower = at86rf212_set_txpower,
 };
 
 static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
@@ -1563,9 +1534,22 @@
 		return -EINVAL;
 	}
 
-	lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
-			IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
-			IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+	lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
+			IEEE802154_HW_CSMA_PARAMS |
+			IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
+			IEEE802154_HW_PROMISCUOUS;
+
+	lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
+			     WPAN_PHY_FLAG_CCA_ED_LEVEL |
+			     WPAN_PHY_FLAG_CCA_MODE;
+
+	lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+		BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+	lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+		BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+	lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
+	lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
 
 	lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
 
@@ -1573,36 +1557,49 @@
 	case 2:
 		chip = "at86rf230";
 		rc = -ENOTSUPP;
-		break;
+		goto not_supp;
 	case 3:
 		chip = "at86rf231";
 		lp->data = &at86rf231_data;
-		lp->hw->phy->channels_supported[0] = 0x7FFF800;
+		lp->hw->phy->supported.channels[0] = 0x7FFF800;
 		lp->hw->phy->current_channel = 11;
 		lp->hw->phy->symbol_duration = 16;
+		lp->hw->phy->supported.tx_powers = at86rf231_powers;
+		lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
 		break;
 	case 7:
 		chip = "at86rf212";
 		lp->data = &at86rf212_data;
 		lp->hw->flags |= IEEE802154_HW_LBT;
-		lp->hw->phy->channels_supported[0] = 0x00007FF;
-		lp->hw->phy->channels_supported[2] = 0x00007FF;
+		lp->hw->phy->supported.channels[0] = 0x00007FF;
+		lp->hw->phy->supported.channels[2] = 0x00007FF;
 		lp->hw->phy->current_channel = 5;
 		lp->hw->phy->symbol_duration = 25;
+		lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+		lp->hw->phy->supported.tx_powers = at86rf212_powers;
+		lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+		lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+		lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
 		break;
 	case 11:
 		chip = "at86rf233";
 		lp->data = &at86rf233_data;
-		lp->hw->phy->channels_supported[0] = 0x7FFF800;
+		lp->hw->phy->supported.channels[0] = 0x7FFF800;
 		lp->hw->phy->current_channel = 13;
 		lp->hw->phy->symbol_duration = 16;
+		lp->hw->phy->supported.tx_powers = at86rf233_powers;
+		lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
 		break;
 	default:
 		chip = "unknown";
 		rc = -ENOTSUPP;
-		break;
+		goto not_supp;
 	}
 
+	lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7];
+	lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0];
+
+not_supp:
 	dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
 
 	return rc;
@@ -1696,7 +1693,6 @@
 	lp->spi = spi;
 	lp->slp_tr = slp_tr;
 	hw->parent = &spi->dev;
-	hw->vif_data_size = sizeof(*lp);
 	ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
 
 	lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config);
@@ -1728,13 +1724,19 @@
 
 	irq_type = irq_get_trigger_type(spi->irq);
 	if (!irq_type)
-		irq_type = IRQF_TRIGGER_RISING;
+		irq_type = IRQF_TRIGGER_HIGH;
 
 	rc = devm_request_irq(&spi->dev, spi->irq, at86rf230_isr,
 			      IRQF_SHARED | irq_type, dev_name(&spi->dev), lp);
 	if (rc)
 		goto free_dev;
 
+	/* disable_irq by default and wait for starting hardware */
+	disable_irq(spi->irq);
+
+	/* going into sleep by default */
+	at86rf230_sleep(lp);
+
 	rc = ieee802154_register_hw(lp->hw);
 	if (rc)
 		goto free_dev;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
new file mode 100644
index 0000000..1e6d1cc
--- /dev/null
+++ b/drivers/net/ieee802154/at86rf230.h
@@ -0,0 +1,220 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef _AT86RF230_H
+#define _AT86RF230_H
+
+#define RG_TRX_STATUS	(0x01)
+#define SR_TRX_STATUS		0x01, 0x1f, 0
+#define SR_RESERVED_01_3	0x01, 0x20, 5
+#define SR_CCA_STATUS		0x01, 0x40, 6
+#define SR_CCA_DONE		0x01, 0x80, 7
+#define RG_TRX_STATE	(0x02)
+#define SR_TRX_CMD		0x02, 0x1f, 0
+#define SR_TRAC_STATUS		0x02, 0xe0, 5
+#define RG_TRX_CTRL_0	(0x03)
+#define SR_CLKM_CTRL		0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL		0x03, 0x08, 3
+#define SR_PAD_IO_CLKM		0x03, 0x30, 4
+#define SR_PAD_IO		0x03, 0xc0, 6
+#define RG_TRX_CTRL_1	(0x04)
+#define SR_IRQ_POLARITY		0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE	0x04, 0x02, 1
+#define SR_SPI_CMD_MODE		0x04, 0x0c, 2
+#define SR_RX_BL_CTRL		0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON	0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN		0x04, 0x40, 6
+#define SR_PA_EXT_EN		0x04, 0x80, 7
+#define RG_PHY_TX_PWR	(0x05)
+#define SR_TX_PWR_23X		0x05, 0x0f, 0
+#define SR_PA_LT_230		0x05, 0x30, 4
+#define SR_PA_BUF_LT_230	0x05, 0xc0, 6
+#define SR_TX_PWR_212		0x05, 0x1f, 0
+#define SR_GC_PA_212		0x05, 0x60, 5
+#define SR_PA_BOOST_LT_212	0x05, 0x80, 7
+#define RG_PHY_RSSI	(0x06)
+#define SR_RSSI			0x06, 0x1f, 0
+#define SR_RND_VALUE		0x06, 0x60, 5
+#define SR_RX_CRC_VALID		0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL	(0x07)
+#define SR_ED_LEVEL		0x07, 0xff, 0
+#define RG_PHY_CC_CCA	(0x08)
+#define SR_CHANNEL		0x08, 0x1f, 0
+#define SR_CCA_MODE		0x08, 0x60, 5
+#define SR_CCA_REQUEST		0x08, 0x80, 7
+#define RG_CCA_THRES	(0x09)
+#define SR_CCA_ED_THRES		0x09, 0x0f, 0
+#define SR_RESERVED_09_1	0x09, 0xf0, 4
+#define RG_RX_CTRL	(0x0a)
+#define SR_PDT_THRES		0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1	0x0a, 0xf0, 4
+#define RG_SFD_VALUE	(0x0b)
+#define SR_SFD_VALUE		0x0b, 0xff, 0
+#define RG_TRX_CTRL_2	(0x0c)
+#define SR_OQPSK_DATA_RATE	0x0c, 0x03, 0
+#define SR_SUB_MODE		0x0c, 0x04, 2
+#define SR_BPSK_QPSK		0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN	0x0c, 0x10, 4
+#define SR_RESERVED_0c_5	0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE		0x0c, 0x80, 7
+#define RG_ANT_DIV	(0x0d)
+#define SR_ANT_CTRL		0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN	0x0d, 0x04, 2
+#define SR_ANT_DIV_EN		0x0d, 0x08, 3
+#define SR_RESERVED_0d_2	0x0d, 0x70, 4
+#define SR_ANT_SEL		0x0d, 0x80, 7
+#define RG_IRQ_MASK	(0x0e)
+#define SR_IRQ_MASK		0x0e, 0xff, 0
+#define RG_IRQ_STATUS	(0x0f)
+#define SR_IRQ_0_PLL_LOCK	0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK	0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START	0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END	0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE	0x0f, 0x10, 4
+#define SR_IRQ_5_AMI		0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR		0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW	0x0f, 0x80, 7
+#define RG_VREG_CTRL	(0x10)
+#define SR_RESERVED_10_6	0x10, 0x03, 0
+#define SR_DVDD_OK		0x10, 0x04, 2
+#define SR_DVREG_EXT		0x10, 0x08, 3
+#define SR_RESERVED_10_3	0x10, 0x30, 4
+#define SR_AVDD_OK		0x10, 0x40, 6
+#define SR_AVREG_EXT		0x10, 0x80, 7
+#define RG_BATMON	(0x11)
+#define SR_BATMON_VTH		0x11, 0x0f, 0
+#define SR_BATMON_HR		0x11, 0x10, 4
+#define SR_BATMON_OK		0x11, 0x20, 5
+#define SR_RESERVED_11_1	0x11, 0xc0, 6
+#define RG_XOSC_CTRL	(0x12)
+#define SR_XTAL_TRIM		0x12, 0x0f, 0
+#define SR_XTAL_MODE		0x12, 0xf0, 4
+#define RG_RX_SYN	(0x15)
+#define SR_RX_PDT_LEVEL		0x15, 0x0f, 0
+#define SR_RESERVED_15_2	0x15, 0x70, 4
+#define SR_RX_PDT_DIS		0x15, 0x80, 7
+#define RG_XAH_CTRL_1	(0x17)
+#define SR_RESERVED_17_8	0x17, 0x01, 0
+#define SR_AACK_PROM_MODE	0x17, 0x02, 1
+#define SR_AACK_ACK_TIME	0x17, 0x04, 2
+#define SR_RESERVED_17_5	0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT	0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT	0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE	0x17, 0x40, 6
+#define SR_RESERVED_17_1	0x17, 0x80, 7
+#define RG_FTN_CTRL	(0x18)
+#define SR_RESERVED_18_2	0x18, 0x7f, 0
+#define SR_FTN_START		0x18, 0x80, 7
+#define RG_PLL_CF	(0x1a)
+#define SR_RESERVED_1a_2	0x1a, 0x7f, 0
+#define SR_PLL_CF_START		0x1a, 0x80, 7
+#define RG_PLL_DCU	(0x1b)
+#define SR_RESERVED_1b_3	0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2	0x1b, 0x40, 6
+#define SR_PLL_DCU_START	0x1b, 0x80, 7
+#define RG_PART_NUM	(0x1c)
+#define SR_PART_NUM		0x1c, 0xff, 0
+#define RG_VERSION_NUM	(0x1d)
+#define SR_VERSION_NUM		0x1d, 0xff, 0
+#define RG_MAN_ID_0	(0x1e)
+#define SR_MAN_ID_0		0x1e, 0xff, 0
+#define RG_MAN_ID_1	(0x1f)
+#define SR_MAN_ID_1		0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0	(0x20)
+#define SR_SHORT_ADDR_0		0x20, 0xff, 0
+#define RG_SHORT_ADDR_1	(0x21)
+#define SR_SHORT_ADDR_1		0x21, 0xff, 0
+#define RG_PAN_ID_0	(0x22)
+#define SR_PAN_ID_0		0x22, 0xff, 0
+#define RG_PAN_ID_1	(0x23)
+#define SR_PAN_ID_1		0x23, 0xff, 0
+#define RG_IEEE_ADDR_0	(0x24)
+#define SR_IEEE_ADDR_0		0x24, 0xff, 0
+#define RG_IEEE_ADDR_1	(0x25)
+#define SR_IEEE_ADDR_1		0x25, 0xff, 0
+#define RG_IEEE_ADDR_2	(0x26)
+#define SR_IEEE_ADDR_2		0x26, 0xff, 0
+#define RG_IEEE_ADDR_3	(0x27)
+#define SR_IEEE_ADDR_3		0x27, 0xff, 0
+#define RG_IEEE_ADDR_4	(0x28)
+#define SR_IEEE_ADDR_4		0x28, 0xff, 0
+#define RG_IEEE_ADDR_5	(0x29)
+#define SR_IEEE_ADDR_5		0x29, 0xff, 0
+#define RG_IEEE_ADDR_6	(0x2a)
+#define SR_IEEE_ADDR_6		0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7	(0x2b)
+#define SR_IEEE_ADDR_7		0x2b, 0xff, 0
+#define RG_XAH_CTRL_0	(0x2c)
+#define SR_SLOTTED_OPERATION	0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES	0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES	0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0	(0x2d)
+#define SR_CSMA_SEED_0		0x2d, 0xff, 0
+#define RG_CSMA_SEED_1	(0x2e)
+#define SR_CSMA_SEED_1		0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD	0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK		0x2e, 0x10, 4
+#define SR_AACK_SET_PD		0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE	0x2e, 0xc0, 6
+#define RG_CSMA_BE	(0x2f)
+#define SR_MIN_BE		0x2f, 0x0f, 0
+#define SR_MAX_BE		0x2f, 0xf0, 4
+
+#define CMD_REG		0x80
+#define CMD_REG_MASK	0x3f
+#define CMD_WRITE	0x40
+#define CMD_FB		0x20
+
+#define IRQ_BAT_LOW	BIT(7)
+#define IRQ_TRX_UR	BIT(6)
+#define IRQ_AMI		BIT(5)
+#define IRQ_CCA_ED	BIT(4)
+#define IRQ_TRX_END	BIT(3)
+#define IRQ_RX_START	BIT(2)
+#define IRQ_PLL_UNL	BIT(1)
+#define IRQ_PLL_LOCK	BIT(0)
+
+#define IRQ_ACTIVE_HIGH	0
+#define IRQ_ACTIVE_LOW	1
+
+#define STATE_P_ON		0x00	/* BUSY */
+#define STATE_BUSY_RX		0x01
+#define STATE_BUSY_TX		0x02
+#define STATE_FORCE_TRX_OFF	0x03
+#define STATE_FORCE_TX_ON	0x04	/* IDLE */
+/* 0x05 */				/* INVALID_PARAMETER */
+#define STATE_RX_ON		0x06
+/* 0x07 */				/* SUCCESS */
+#define STATE_TRX_OFF		0x08
+#define STATE_TX_ON		0x09
+/* 0x0a - 0x0e */			/* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP		0x0F
+#define STATE_PREP_DEEP_SLEEP	0x10
+#define STATE_BUSY_RX_AACK	0x11
+#define STATE_BUSY_TX_ARET	0x12
+#define STATE_RX_AACK_ON	0x16
+#define STATE_TX_ARET_ON	0x19
+#define STATE_RX_ON_NOCLK	0x1C
+#define STATE_RX_AACK_ON_NOCLK	0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+#define TRX_STATE_MASK		(0x1F)
+
+#endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
new file mode 100644
index 0000000..80dfc72
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.c
@@ -0,0 +1,762 @@
+/*
+ * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * Based on at86rf230.c and spi_atusb.c.
+ * at86rf230.c is
+ * Copyright (C) 2009 Siemens AG
+ * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ *
+ * spi_atusb.c is
+ * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com>
+ * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org>
+ * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net>
+ *
+ * USB initialization is
+ * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+#include <net/mac802154.h>
+
+#include "at86rf230.h"
+#include "atusb.h"
+
+#define ATUSB_JEDEC_ATMEL	0x1f	/* JEDEC manufacturer ID */
+
+#define ATUSB_NUM_RX_URBS	4	/* allow for a bit of local latency */
+#define ATUSB_ALLOC_DELAY_MS	100	/* delay after failed allocation */
+#define ATUSB_TX_TIMEOUT_MS	200	/* on the air timeout */
+
+struct atusb {
+	struct ieee802154_hw *hw;
+	struct usb_device *usb_dev;
+	int shutdown;			/* non-zero if shutting down */
+	int err;			/* set by first error */
+
+	/* RX variables */
+	struct delayed_work work;	/* memory allocations */
+	struct usb_anchor idle_urbs;	/* URBs waiting to be submitted */
+	struct usb_anchor rx_urbs;	/* URBs waiting for reception */
+
+	/* TX variables */
+	struct usb_ctrlrequest tx_dr;
+	struct urb *tx_urb;
+	struct sk_buff *tx_skb;
+	uint8_t tx_ack_seq;		/* current TX ACK sequence number */
+};
+
+/* ----- USB commands without data ----------------------------------------- */
+
+/* To reduce the number of error checks in the code, we record the first error
+ * in atusb->err and reject all subsequent requests until the error is cleared.
+ */
+
+static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
+			     __u8 request, __u8 requesttype,
+			     __u16 value, __u16 index,
+			     void *data, __u16 size, int timeout)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	int ret;
+
+	if (atusb->err)
+		return atusb->err;
+
+	ret = usb_control_msg(usb_dev, pipe, request, requesttype,
+			      value, index, data, size, timeout);
+	if (ret < 0) {
+		atusb->err = ret;
+		dev_err(&usb_dev->dev,
+			"atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+			request, value, index, ret);
+	}
+	return ret;
+}
+
+static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+
+	dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+	return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+				 cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
+}
+
+static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+
+	dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
+		reg, value);
+	return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+				 ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+				 value, reg, NULL, 0, 1000);
+}
+
+static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	int ret;
+	uint8_t value;
+
+	dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+	ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+				ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+				0, reg, &value, 1, 1000);
+	return ret >= 0 ? value : ret;
+}
+
+static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
+			      uint8_t shift, uint8_t value)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	uint8_t orig, tmp;
+	int ret = 0;
+
+	dev_dbg(&usb_dev->dev, "atusb_write_subreg: 0x%02x <- 0x%02x\n",
+		reg, value);
+
+	orig = atusb_read_reg(atusb, reg);
+
+	/* Write the value only into that part of the register which is allowed
+	 * by the mask. All other bits stay as before.
+	 */
+	tmp = orig & ~mask;
+	tmp |= (value << shift) & mask;
+
+	if (tmp != orig)
+		ret = atusb_write_reg(atusb, reg, tmp);
+
+	return ret;
+}
+
+static int atusb_get_and_clear_error(struct atusb *atusb)
+{
+	int err = atusb->err;
+
+	atusb->err = 0;
+	return err;
+}
+
+/* ----- skb allocation ---------------------------------------------------- */
+
+#define MAX_PSDU	127
+#define MAX_RX_XFER	(1 + MAX_PSDU + 2 + 1)	/* PHR+PSDU+CRC+LQI */
+
+#define SKB_ATUSB(skb)	(*(struct atusb **)(skb)->cb)
+
+static void atusb_in(struct urb *urb);
+
+static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	struct sk_buff *skb = urb->context;
+	int ret;
+
+	if (!skb) {
+		skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
+		if (!skb) {
+			dev_warn_ratelimited(&usb_dev->dev,
+					     "atusb_in: can't allocate skb\n");
+			return -ENOMEM;
+		}
+		skb_put(skb, MAX_RX_XFER);
+		SKB_ATUSB(skb) = atusb;
+	}
+
+	usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1),
+			  skb->data, MAX_RX_XFER, atusb_in, skb);
+	usb_anchor_urb(urb, &atusb->rx_urbs);
+
+	ret = usb_submit_urb(urb, GFP_KERNEL);
+	if (ret) {
+		usb_unanchor_urb(urb);
+		kfree_skb(skb);
+		urb->context = NULL;
+	}
+	return ret;
+}
+
+static void atusb_work_urbs(struct work_struct *work)
+{
+	struct atusb *atusb =
+	    container_of(to_delayed_work(work), struct atusb, work);
+	struct usb_device *usb_dev = atusb->usb_dev;
+	struct urb *urb;
+	int ret;
+
+	if (atusb->shutdown)
+		return;
+
+	do {
+		urb = usb_get_from_anchor(&atusb->idle_urbs);
+		if (!urb)
+			return;
+		ret = atusb_submit_rx_urb(atusb, urb);
+	} while (!ret);
+
+	usb_anchor_urb(urb, &atusb->idle_urbs);
+	dev_warn_ratelimited(&usb_dev->dev,
+			     "atusb_in: can't allocate/submit URB (%d)\n", ret);
+	schedule_delayed_work(&atusb->work,
+			      msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
+}
+
+/* ----- Asynchronous USB -------------------------------------------------- */
+
+static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	uint8_t expect = atusb->tx_ack_seq;
+
+	dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+	if (seq == expect) {
+		/* TODO check for ifs handling in firmware */
+		ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+	} else {
+		/* TODO I experience this case when atusb has a tx complete
+		 * irq before probing, we should fix the firmware it's an
+		 * unlikely case now that seq == expect is then true, but can
+		 * happen and fail with a tx_skb = NULL;
+		 */
+		ieee802154_wake_queue(atusb->hw);
+		if (atusb->tx_skb)
+			dev_kfree_skb_irq(atusb->tx_skb);
+	}
+}
+
+static void atusb_in_good(struct urb *urb)
+{
+	struct usb_device *usb_dev = urb->dev;
+	struct sk_buff *skb = urb->context;
+	struct atusb *atusb = SKB_ATUSB(skb);
+	uint8_t len, lqi;
+
+	if (!urb->actual_length) {
+		dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
+		return;
+	}
+
+	len = *skb->data;
+
+	if (urb->actual_length == 1) {
+		atusb_tx_done(atusb, len);
+		return;
+	}
+
+	if (len + 1 > urb->actual_length - 1) {
+		dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n",
+			len, urb->actual_length);
+		return;
+	}
+
+	if (!ieee802154_is_valid_psdu_len(len)) {
+		dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n");
+		return;
+	}
+
+	lqi = skb->data[len + 1];
+	dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi);
+	skb_pull(skb, 1);	/* remove PHR */
+	skb_trim(skb, len);	/* get payload only */
+	ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
+	urb->context = NULL;	/* skb is gone */
+}
+
+static void atusb_in(struct urb *urb)
+{
+	struct usb_device *usb_dev = urb->dev;
+	struct sk_buff *skb = urb->context;
+	struct atusb *atusb = SKB_ATUSB(skb);
+
+	dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+		urb->status, urb->actual_length);
+	if (urb->status) {
+		if (urb->status == -ENOENT) { /* being killed */
+			kfree_skb(skb);
+			urb->context = NULL;
+			return;
+		}
+		dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+	} else {
+		atusb_in_good(urb);
+	}
+
+	usb_anchor_urb(urb, &atusb->idle_urbs);
+	if (!atusb->shutdown)
+		schedule_delayed_work(&atusb->work, 0);
+}
+
+/* ----- URB allocation/deallocation --------------------------------------- */
+
+static void atusb_free_urbs(struct atusb *atusb)
+{
+	struct urb *urb;
+
+	while (1) {
+		urb = usb_get_from_anchor(&atusb->idle_urbs);
+		if (!urb)
+			break;
+		if (urb->context)
+			kfree_skb(urb->context);
+		usb_free_urb(urb);
+	}
+}
+
+static int atusb_alloc_urbs(struct atusb *atusb, int n)
+{
+	struct urb *urb;
+
+	while (n) {
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			atusb_free_urbs(atusb);
+			return -ENOMEM;
+		}
+		usb_anchor_urb(urb, &atusb->idle_urbs);
+		n--;
+	}
+	return 0;
+}
+
+/* ----- IEEE 802.15.4 interface operations -------------------------------- */
+
+static void atusb_xmit_complete(struct urb *urb)
+{
+	dev_dbg(&urb->dev->dev, "atusb_xmit urb completed");
+}
+
+static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+	struct atusb *atusb = hw->priv;
+	struct usb_device *usb_dev = atusb->usb_dev;
+	int ret;
+
+	dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+	atusb->tx_skb = skb;
+	atusb->tx_ack_seq++;
+	atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
+	atusb->tx_dr.wLength = cpu_to_le16(skb->len);
+
+	usb_fill_control_urb(atusb->tx_urb, usb_dev,
+			     usb_sndctrlpipe(usb_dev, 0),
+			     (unsigned char *)&atusb->tx_dr, skb->data,
+			     skb->len, atusb_xmit_complete, NULL);
+	ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
+	dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+	return ret;
+}
+
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+	struct atusb *atusb = hw->priv;
+	int ret;
+
+	/* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
+	 * "Mode 3a, Carrier sense OR energy above threshold".
+	 * We should probably make this configurable. @@@
+	 */
+	ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+	if (ret < 0)
+		return ret;
+	msleep(1);	/* @@@ ugly synchronization */
+	return 0;
+}
+
+static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
+{
+	BUG_ON(!level);
+	*level = 0xbe;
+	return 0;
+}
+
+static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
+				  struct ieee802154_hw_addr_filt *filt,
+				  unsigned long changed)
+{
+	struct atusb *atusb = hw->priv;
+	struct device *dev = &atusb->usb_dev->dev;
+
+	if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+		u16 addr = le16_to_cpu(filt->short_addr);
+
+		dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+		atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
+		atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+	}
+
+	if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+		u16 pan = le16_to_cpu(filt->pan_id);
+
+		dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+		atusb_write_reg(atusb, RG_PAN_ID_0, pan);
+		atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+	}
+
+	if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+		u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
+
+		memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
+		dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+		for (i = 0; i < 8; i++)
+			atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+	}
+
+	if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+		dev_vdbg(dev,
+			 "atusb_set_hw_addr_filt called for panc change\n");
+		if (filt->pan_coord)
+			atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 1);
+		else
+			atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 0);
+	}
+
+	return atusb_get_and_clear_error(atusb);
+}
+
+static int atusb_start(struct ieee802154_hw *hw)
+{
+	struct atusb *atusb = hw->priv;
+	struct usb_device *usb_dev = atusb->usb_dev;
+	int ret;
+
+	dev_dbg(&usb_dev->dev, "atusb_start\n");
+	schedule_delayed_work(&atusb->work, 0);
+	atusb_command(atusb, ATUSB_RX_MODE, 1);
+	ret = atusb_get_and_clear_error(atusb);
+	if (ret < 0)
+		usb_kill_anchored_urbs(&atusb->idle_urbs);
+	return ret;
+}
+
+static void atusb_stop(struct ieee802154_hw *hw)
+{
+	struct atusb *atusb = hw->priv;
+	struct usb_device *usb_dev = atusb->usb_dev;
+
+	dev_dbg(&usb_dev->dev, "atusb_stop\n");
+	usb_kill_anchored_urbs(&atusb->idle_urbs);
+	atusb_command(atusb, ATUSB_RX_MODE, 0);
+	atusb_get_and_clear_error(atusb);
+}
+
+#define ATUSB_MAX_TX_POWERS 0xF
+static const s32 atusb_powers[ATUSB_MAX_TX_POWERS + 1] = {
+	300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+	-900, -1200, -1700,
+};
+
+static int
+atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct atusb *atusb = hw->priv;
+	u32 i;
+
+	for (i = 0; i < hw->phy->supported.tx_powers_size; i++) {
+		if (hw->phy->supported.tx_powers[i] == mbm)
+			return atusb_write_subreg(atusb, SR_TX_PWR_23X, i);
+	}
+
+	return -EINVAL;
+}
+
+static int
+atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+	struct atusb *atusb = hw->priv;
+	int ret;
+
+	if (on) {
+		ret = atusb_write_subreg(atusb, SR_AACK_DIS_ACK, 1);
+		if (ret < 0)
+			return ret;
+
+		ret = atusb_write_subreg(atusb, SR_AACK_PROM_MODE, 1);
+		if (ret < 0)
+			return ret;
+	} else {
+		ret = atusb_write_subreg(atusb, SR_AACK_PROM_MODE, 0);
+		if (ret < 0)
+			return ret;
+
+		ret = atusb_write_subreg(atusb, SR_AACK_DIS_ACK, 0);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static struct ieee802154_ops atusb_ops = {
+	.owner			= THIS_MODULE,
+	.xmit_async		= atusb_xmit,
+	.ed			= atusb_ed,
+	.set_channel		= atusb_channel,
+	.start			= atusb_start,
+	.stop			= atusb_stop,
+	.set_hw_addr_filt	= atusb_set_hw_addr_filt,
+	.set_txpower		= atusb_set_txpower,
+	.set_promiscuous_mode	= atusb_set_promiscuous_mode,
+};
+
+/* ----- Firmware and chip version information ----------------------------- */
+
+static int atusb_get_and_show_revision(struct atusb *atusb)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	unsigned char buffer[3];
+	int ret;
+
+	/* Get a couple of the ATMega Firmware values */
+	ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+				ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+				buffer, 3, 1000);
+	if (ret >= 0)
+		dev_info(&usb_dev->dev,
+			 "Firmware: major: %u, minor: %u, hardware type: %u\n",
+			 buffer[0], buffer[1], buffer[2]);
+	if (buffer[0] == 0 && buffer[1] < 2) {
+		dev_info(&usb_dev->dev,
+			 "Firmware version (%u.%u) is predates our first public release.",
+			 buffer[0], buffer[1]);
+		dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+	}
+
+	return ret;
+}
+
+static int atusb_get_and_show_build(struct atusb *atusb)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	char build[ATUSB_BUILD_SIZE + 1];
+	int ret;
+
+	ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+				ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+				build, ATUSB_BUILD_SIZE, 1000);
+	if (ret >= 0) {
+		build[ret] = 0;
+		dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+	}
+
+	return ret;
+}
+
+static int atusb_get_and_show_chip(struct atusb *atusb)
+{
+	struct usb_device *usb_dev = atusb->usb_dev;
+	uint8_t man_id_0, man_id_1, part_num, version_num;
+
+	man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
+	man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
+	part_num = atusb_read_reg(atusb, RG_PART_NUM);
+	version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+
+	if (atusb->err)
+		return atusb->err;
+
+	if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
+		dev_err(&usb_dev->dev,
+			"non-Atmel transceiver xxxx%02x%02x\n",
+			man_id_1, man_id_0);
+		goto fail;
+	}
+	if (part_num != 3 && part_num != 2) {
+		dev_err(&usb_dev->dev,
+			"unexpected transceiver, part 0x%02x version 0x%02x\n",
+			part_num, version_num);
+		goto fail;
+	}
+
+	dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+
+	return 0;
+
+fail:
+	atusb->err = -ENODEV;
+	return -ENODEV;
+}
+
+/* ----- Setup ------------------------------------------------------------- */
+
+static int atusb_probe(struct usb_interface *interface,
+		       const struct usb_device_id *id)
+{
+	struct usb_device *usb_dev = interface_to_usbdev(interface);
+	struct ieee802154_hw *hw;
+	struct atusb *atusb = NULL;
+	int ret = -ENOMEM;
+
+	hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops);
+	if (!hw)
+		return -ENOMEM;
+
+	atusb = hw->priv;
+	atusb->hw = hw;
+	atusb->usb_dev = usb_get_dev(usb_dev);
+	usb_set_intfdata(interface, atusb);
+
+	atusb->shutdown = 0;
+	atusb->err = 0;
+	INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
+	init_usb_anchor(&atusb->idle_urbs);
+	init_usb_anchor(&atusb->rx_urbs);
+
+	if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS))
+		goto fail;
+
+	atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV;
+	atusb->tx_dr.bRequest = ATUSB_TX;
+	atusb->tx_dr.wValue = cpu_to_le16(0);
+
+	atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!atusb->tx_urb)
+		goto fail;
+
+	hw->parent = &usb_dev->dev;
+	hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+		    IEEE802154_HW_PROMISCUOUS;
+
+	hw->phy->flags = WPAN_PHY_FLAG_TXPOWER;
+
+	hw->phy->current_page = 0;
+	hw->phy->current_channel = 11;	/* reset default */
+	hw->phy->supported.channels[0] = 0x7FFF800;
+	hw->phy->supported.tx_powers = atusb_powers;
+	hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
+	hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
+	ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+	atusb_command(atusb, ATUSB_RF_RESET, 0);
+	atusb_get_and_show_chip(atusb);
+	atusb_get_and_show_revision(atusb);
+	atusb_get_and_show_build(atusb);
+	ret = atusb_get_and_clear_error(atusb);
+	if (ret) {
+		dev_err(&atusb->usb_dev->dev,
+			"%s: initialization failed, error = %d\n",
+			__func__, ret);
+		goto fail;
+	}
+
+	ret = ieee802154_register_hw(hw);
+	if (ret)
+		goto fail;
+
+	/* If we just powered on, we're now in P_ON and need to enter TRX_OFF
+	 * explicitly. Any resets after that will send us straight to TRX_OFF,
+	 * making the command below redundant.
+	 */
+	atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+	msleep(1);	/* reset => TRX_OFF, tTR13 = 37 us */
+
+#if 0
+	/* Calculating the maximum time available to empty the frame buffer
+	 * on reception:
+	 *
+	 * According to [1], the inter-frame gap is
+	 * R * 20 * 16 us + 128 us
+	 * where R is a random number from 0 to 7. Furthermore, we have 20 bit
+	 * times (80 us at 250 kbps) of SHR of the next frame before the
+	 * transceiver begins storing data in the frame buffer.
+	 *
+	 * This yields a minimum time of 208 us between the last data of a
+	 * frame and the first data of the next frame. This time is further
+	 * reduced by interrupt latency in the atusb firmware.
+	 *
+	 * atusb currently needs about 500 us to retrieve a maximum-sized
+	 * frame. We therefore have to allow reception of a new frame to begin
+	 * while we retrieve the previous frame.
+	 *
+	 * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based
+	 *      network", Jennic 2006.
+	 *     http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf
+	 */
+
+	atusb_write_subreg(atusb, SR_RX_SAFE_MODE, 1);
+#endif
+	atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+
+	ret = atusb_get_and_clear_error(atusb);
+	if (!ret)
+		return 0;
+
+	dev_err(&atusb->usb_dev->dev,
+		"%s: setup failed, error = %d\n",
+		__func__, ret);
+
+	ieee802154_unregister_hw(hw);
+fail:
+	atusb_free_urbs(atusb);
+	usb_kill_urb(atusb->tx_urb);
+	usb_free_urb(atusb->tx_urb);
+	usb_put_dev(usb_dev);
+	ieee802154_free_hw(hw);
+	return ret;
+}
+
+static void atusb_disconnect(struct usb_interface *interface)
+{
+	struct atusb *atusb = usb_get_intfdata(interface);
+
+	dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+
+	atusb->shutdown = 1;
+	cancel_delayed_work_sync(&atusb->work);
+
+	usb_kill_anchored_urbs(&atusb->rx_urbs);
+	atusb_free_urbs(atusb);
+	usb_kill_urb(atusb->tx_urb);
+	usb_free_urb(atusb->tx_urb);
+
+	ieee802154_unregister_hw(atusb->hw);
+
+	ieee802154_free_hw(atusb->hw);
+
+	usb_set_intfdata(interface, NULL);
+	usb_put_dev(atusb->usb_dev);
+
+	pr_debug("atusb_disconnect done\n");
+}
+
+/* The devices we work with */
+static const struct usb_device_id atusb_device_table[] = {
+	{
+		.match_flags		= USB_DEVICE_ID_MATCH_DEVICE |
+					  USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor		= ATUSB_VENDOR_ID,
+		.idProduct		= ATUSB_PRODUCT_ID,
+		.bInterfaceClass	= USB_CLASS_VENDOR_SPEC
+	},
+	/* end with null element */
+	{}
+};
+MODULE_DEVICE_TABLE(usb, atusb_device_table);
+
+static struct usb_driver atusb_driver = {
+	.name		= "atusb",
+	.probe		= atusb_probe,
+	.disconnect	= atusb_disconnect,
+	.id_table	= atusb_device_table,
+};
+module_usb_driver(atusb_driver);
+
+MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
+MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
+MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
+MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
new file mode 100644
index 0000000..0690edc
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.h
@@ -0,0 +1,84 @@
+/*
+ * atusb.h - Definitions shared between kernel and ATUSB firmware
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2, or
+ * (at your option) any later version.
+ *
+ * This file should be identical for kernel and firmware.
+ * Kernel: drivers/net/ieee802154/atusb.h
+ * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
+ */
+
+#ifndef _ATUSB_H
+#define _ATUSB_H
+
+#define ATUSB_VENDOR_ID	0x20b7	/* Qi Hardware*/
+#define ATUSB_PRODUCT_ID 0x1540	/* 802.15.4, device 0 */
+				/*     -- -         - */
+
+#define ATUSB_BUILD_SIZE 256	/* maximum build version/date message length */
+
+/* Commands to our device. Make sure this is synced with the firmware */
+enum atusb_requests {
+	ATUSB_ID			= 0x00,	/* system status/control grp */
+	ATUSB_BUILD,
+	ATUSB_RESET,
+	ATUSB_RF_RESET			= 0x10,	/* debug/test group */
+	ATUSB_POLL_INT,
+	ATUSB_TEST,			/* atusb-sil only */
+	ATUSB_TIMER,
+	ATUSB_GPIO,
+	ATUSB_SLP_TR,
+	ATUSB_GPIO_CLEANUP,
+	ATUSB_REG_WRITE			= 0x20,	/* transceiver group */
+	ATUSB_REG_READ,
+	ATUSB_BUF_WRITE,
+	ATUSB_BUF_READ,
+	ATUSB_SRAM_WRITE,
+	ATUSB_SRAM_READ,
+	ATUSB_SPI_WRITE			= 0x30,	/* SPI group */
+	ATUSB_SPI_READ1,
+	ATUSB_SPI_READ2,
+	ATUSB_SPI_WRITE2_SYNC,
+	ATUSB_RX_MODE			= 0x40, /* HardMAC group */
+	ATUSB_TX,
+};
+
+/* Direction	bRequest		wValue		wIndex	wLength
+ *
+ * ->host	ATUSB_ID		-		-	3
+ * ->host	ATUSB_BUILD		-		-	#bytes
+ * host->	ATUSB_RESET		-		-	0
+ *
+ * host->	ATUSB_RF_RESET		-		-	0
+ * ->host	ATUSB_POLL_INT		-		-	1
+ * host->	ATUSB_TEST		-		-	0
+ * ->host	ATUSB_TIMER		-		-	#bytes (6)
+ * ->host	ATUSB_GPIO		dir+data	mask+p#	3
+ * host->	ATUSB_SLP_TR		-		-	0
+ * host->	ATUSB_GPIO_CLEANUP	-		-	0
+ *
+ * host->	ATUSB_REG_WRITE		value		addr	0
+ * ->host	ATUSB_REG_READ		-		addr	1
+ * host->	ATUSB_BUF_WRITE		-		-	#bytes
+ * ->host	ATUSB_BUF_READ		-		-	#bytes
+ * host->	ATUSB_SRAM_WRITE	-		addr	#bytes
+ * ->host	ATUSB_SRAM_READ		-		addr	#bytes
+ *
+ * host->	ATUSB_SPI_WRITE		byte0		byte1	#bytes
+ * ->host	ATUSB_SPI_READ1		byte0		-	#bytes
+ * ->host	ATUSB_SPI_READ2		byte0		byte1	#bytes
+ * ->host	ATUSB_SPI_WRITE2_SYNC	byte0		byte1	0/1
+ *
+ * host->	ATUSB_RX_MODE		on		-	0
+ * host->	ATUSB_TX		flags		ack_seq	#bytes
+ */
+
+#define ATUSB_REQ_FROM_DEV	(USB_TYPE_VENDOR | USB_DIR_IN)
+#define ATUSB_REQ_TO_DEV	(USB_TYPE_VENDOR | USB_DIR_OUT)
+
+#endif /* !_ATUSB_H */
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f833b8b..b6fc295 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -196,6 +196,7 @@
 	u8 *buf;			/* SPI TX/Rx data buffer */
 	struct mutex buffer_mutex;	/* SPI buffer mutex */
 	bool is_tx;			/* Flag for sync b/w Tx and Rx */
+	bool amplified;			/* Flag for CC2591 */
 	int fifo_pin;			/* FIFO GPIO pin number */
 	struct work_struct fifop_irqwork;/* Workqueue for FIFOP */
 	spinlock_t lock;		/* Lock for is_tx*/
@@ -589,22 +590,23 @@
 	      struct ieee802154_hw_addr_filt *filt, unsigned long changed)
 {
 	struct cc2520_private *priv = hw->priv;
+	int ret = 0;
 
 	if (changed & IEEE802154_AFILT_PANID_CHANGED) {
 		u16 panid = le16_to_cpu(filt->pan_id);
 
 		dev_vdbg(&priv->spi->dev,
 			 "cc2520_filter called for pan id\n");
-		cc2520_write_ram(priv, CC2520RAM_PANID,
-				 sizeof(panid), (u8 *)&panid);
+		ret = cc2520_write_ram(priv, CC2520RAM_PANID,
+				       sizeof(panid), (u8 *)&panid);
 	}
 
 	if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
 		dev_vdbg(&priv->spi->dev,
 			 "cc2520_filter called for IEEE addr\n");
-		cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
-				 sizeof(filt->ieee_addr),
-				 (u8 *)&filt->ieee_addr);
+		ret = cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
+				       sizeof(filt->ieee_addr),
+				       (u8 *)&filt->ieee_addr);
 	}
 
 	if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
@@ -612,20 +614,113 @@
 
 		dev_vdbg(&priv->spi->dev,
 			 "cc2520_filter called for saddr\n");
-		cc2520_write_ram(priv, CC2520RAM_SHORTADDR,
-				 sizeof(addr), (u8 *)&addr);
+		ret = cc2520_write_ram(priv, CC2520RAM_SHORTADDR,
+				       sizeof(addr), (u8 *)&addr);
 	}
 
 	if (changed & IEEE802154_AFILT_PANC_CHANGED) {
 		dev_vdbg(&priv->spi->dev,
 			 "cc2520_filter called for panc change\n");
 		if (filt->pan_coord)
-			cc2520_write_register(priv, CC2520_FRMFILT0, 0x02);
+			ret = cc2520_write_register(priv, CC2520_FRMFILT0,
+						    0x02);
 		else
-			cc2520_write_register(priv, CC2520_FRMFILT0, 0x00);
+			ret = cc2520_write_register(priv, CC2520_FRMFILT0,
+						    0x00);
 	}
 
-	return 0;
+	return ret;
+}
+
+static inline int cc2520_set_tx_power(struct cc2520_private *priv, s32 mbm)
+{
+	u8 power;
+
+	switch (mbm) {
+	case 500:
+		power = 0xF7;
+		break;
+	case 300:
+		power = 0xF2;
+		break;
+	case 200:
+		power = 0xAB;
+		break;
+	case 100:
+		power = 0x13;
+		break;
+	case 0:
+		power = 0x32;
+		break;
+	case -200:
+		power = 0x81;
+		break;
+	case -400:
+		power = 0x88;
+		break;
+	case -700:
+		power = 0x2C;
+		break;
+	case -1800:
+		power = 0x03;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return cc2520_write_register(priv, CC2520_TXPOWER, power);
+}
+
+static inline int cc2520_cc2591_set_tx_power(struct cc2520_private *priv,
+					     s32 mbm)
+{
+	u8 power;
+
+	switch (mbm) {
+	case 1700:
+		power = 0xF9;
+		break;
+	case 1600:
+		power = 0xF0;
+		break;
+	case 1400:
+		power = 0xA0;
+		break;
+	case 1100:
+		power = 0x2C;
+		break;
+	case -100:
+		power = 0x03;
+		break;
+	case -800:
+		power = 0x01;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return cc2520_write_register(priv, CC2520_TXPOWER, power);
+}
+
+#define CC2520_MAX_TX_POWERS 0x8
+static const s32 cc2520_powers[CC2520_MAX_TX_POWERS + 1] = {
+	500, 300, 200, 100, 0, -200, -400, -700, -1800,
+};
+
+#define CC2520_CC2591_MAX_TX_POWERS 0x5
+static const s32 cc2520_cc2591_powers[CC2520_CC2591_MAX_TX_POWERS + 1] = {
+	1700, 1600, 1400, 1100, -100, -800,
+};
+
+static int
+cc2520_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct cc2520_private *priv = hw->priv;
+
+	if (!priv->amplified)
+		return cc2520_set_tx_power(priv, mbm);
+
+	return cc2520_cc2591_set_tx_power(priv, mbm);
 }
 
 static const struct ieee802154_ops cc2520_ops = {
@@ -636,6 +731,7 @@
 	.ed = cc2520_ed,
 	.set_channel = cc2520_set_channel,
 	.set_hw_addr_filt = cc2520_filter,
+	.set_txpower = cc2520_set_txpower,
 };
 
 static int cc2520_register(struct cc2520_private *priv)
@@ -649,13 +745,25 @@
 	priv->hw->priv = priv;
 	priv->hw->parent = &priv->spi->dev;
 	priv->hw->extra_tx_headroom = 0;
-	priv->hw->vif_data_size = sizeof(*priv);
 	ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
 
 	/* We do support only 2.4 Ghz */
-	priv->hw->phy->channels_supported[0] = 0x7FFF800;
-	priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
-			  IEEE802154_HW_AFILT;
+	priv->hw->phy->supported.channels[0] = 0x7FFF800;
+	priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT;
+
+	priv->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER;
+
+	if (!priv->amplified) {
+		priv->hw->phy->supported.tx_powers = cc2520_powers;
+		priv->hw->phy->supported.tx_powers_size = ARRAY_SIZE(cc2520_powers);
+		priv->hw->phy->transmit_power = priv->hw->phy->supported.tx_powers[4];
+	} else {
+		priv->hw->phy->supported.tx_powers = cc2520_cc2591_powers;
+		priv->hw->phy->supported.tx_powers_size = ARRAY_SIZE(cc2520_cc2591_powers);
+		priv->hw->phy->transmit_power = priv->hw->phy->supported.tx_powers[0];
+	}
+
+	priv->hw->phy->current_channel = 11;
 
 	dev_vdbg(&priv->spi->dev, "registered cc2520\n");
 	ret = ieee802154_register_hw(priv->hw);
@@ -738,7 +846,9 @@
 	pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
 	pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
 
-	pdata->amplified = of_property_read_bool(np, "amplified");
+	/* CC2591 front end for CC2520 */
+	if (of_property_read_bool(np, "amplified"))
+		priv->amplified = true;
 
 	return 0;
 }
@@ -781,11 +891,7 @@
 	 * amplifier. See section 8 page 17 of TI application note AN065.
 	 * http://www.ti.com/lit/an/swra229a/swra229a.pdf
 	 */
-	if (pdata.amplified) {
-		ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF9);
-		if (ret)
-			goto err_ret;
-
+	if (priv->amplified) {
 		ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x16);
 		if (ret)
 			goto err_ret;
@@ -806,10 +912,6 @@
 		if (ret)
 			goto err_ret;
 	} else {
-		ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
-		if (ret)
-			goto err_ret;
-
 		ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
 		if (ret)
 			goto err_ret;
@@ -904,6 +1006,9 @@
 	spin_lock_init(&priv->lock);
 	init_completion(&priv->tx_complete);
 
+	/* Assumption that CC2591 is not connected */
+	priv->amplified = false;
+
 	/* Request all the gpio's */
 	if (!gpio_is_valid(pdata.fifo)) {
 		dev_err(&spi->dev, "fifo gpio is not valid\n");
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index dc2bfb6..860d4ae 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -27,25 +27,27 @@
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
 
-static int numlbs = 1;
+static int numlbs = 2;
 
-struct fakelb_dev_priv {
+static LIST_HEAD(fakelb_phys);
+static DEFINE_SPINLOCK(fakelb_phys_lock);
+
+static LIST_HEAD(fakelb_ifup_phys);
+static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
+
+struct fakelb_phy {
 	struct ieee802154_hw *hw;
 
-	struct list_head list;
-	struct fakelb_priv *fake;
+	u8 page;
+	u8 channel;
 
-	spinlock_t lock;
-	bool working;
+	bool suspended;
+
+	struct list_head list;
+	struct list_head list_ifup;
 };
 
-struct fakelb_priv {
-	struct list_head list;
-	rwlock_t lock;
-};
-
-static int
-fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
 	BUG_ON(!level);
 	*level = 0xbe;
@@ -53,78 +55,66 @@
 	return 0;
 }
 
-static int
-fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
 {
-	pr_debug("set channel to %d\n", channel);
+	struct fakelb_phy *phy = hw->priv;
 
+	write_lock_bh(&fakelb_ifup_phys_lock);
+	phy->page = page;
+	phy->channel = channel;
+	write_unlock_bh(&fakelb_ifup_phys_lock);
 	return 0;
 }
 
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 {
-	struct sk_buff *newskb;
+	struct fakelb_phy *current_phy = hw->priv, *phy;
 
-	spin_lock(&priv->lock);
-	if (priv->working) {
-		newskb = pskb_copy(skb, GFP_ATOMIC);
-		ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
-	}
-	spin_unlock(&priv->lock);
-}
+	read_lock_bh(&fakelb_ifup_phys_lock);
+	WARN_ON(current_phy->suspended);
+	list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) {
+		if (current_phy == phy)
+			continue;
 
-static int
-fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
-{
-	struct fakelb_dev_priv *priv = hw->priv;
-	struct fakelb_priv *fake = priv->fake;
+		if (current_phy->page == phy->page &&
+		    current_phy->channel == phy->channel) {
+			struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
 
-	read_lock_bh(&fake->lock);
-	if (priv->list.next == priv->list.prev) {
-		/* we are the only one device */
-		fakelb_hw_deliver(priv, skb);
-	} else {
-		struct fakelb_dev_priv *dp;
-		list_for_each_entry(dp, &priv->fake->list, list) {
-			if (dp != priv &&
-			    (dp->hw->phy->current_channel ==
-			     priv->hw->phy->current_channel))
-				fakelb_hw_deliver(dp, skb);
+			if (newskb)
+				ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc);
 		}
 	}
-	read_unlock_bh(&fake->lock);
+	read_unlock_bh(&fakelb_ifup_phys_lock);
+
+	ieee802154_xmit_complete(hw, skb, false);
+	return 0;
+}
+
+static int fakelb_hw_start(struct ieee802154_hw *hw)
+{
+	struct fakelb_phy *phy = hw->priv;
+
+	write_lock_bh(&fakelb_ifup_phys_lock);
+	phy->suspended = false;
+	list_add(&phy->list_ifup, &fakelb_ifup_phys);
+	write_unlock_bh(&fakelb_ifup_phys_lock);
 
 	return 0;
 }
 
-static int
-fakelb_hw_start(struct ieee802154_hw *hw) {
-	struct fakelb_dev_priv *priv = hw->priv;
-	int ret = 0;
+static void fakelb_hw_stop(struct ieee802154_hw *hw)
+{
+	struct fakelb_phy *phy = hw->priv;
 
-	spin_lock(&priv->lock);
-	if (priv->working)
-		ret = -EBUSY;
-	else
-		priv->working = 1;
-	spin_unlock(&priv->lock);
-
-	return ret;
-}
-
-static void
-fakelb_hw_stop(struct ieee802154_hw *hw) {
-	struct fakelb_dev_priv *priv = hw->priv;
-
-	spin_lock(&priv->lock);
-	priv->working = 0;
-	spin_unlock(&priv->lock);
+	write_lock_bh(&fakelb_ifup_phys_lock);
+	phy->suspended = true;
+	list_del(&phy->list_ifup);
+	write_unlock_bh(&fakelb_ifup_phys_lock);
 }
 
 static const struct ieee802154_ops fakelb_ops = {
 	.owner = THIS_MODULE,
-	.xmit_sync = fakelb_hw_xmit,
+	.xmit_async = fakelb_hw_xmit,
 	.ed = fakelb_hw_ed,
 	.set_channel = fakelb_hw_channel,
 	.start = fakelb_hw_start,
@@ -135,54 +125,54 @@
 module_param(numlbs, int, 0);
 MODULE_PARM_DESC(numlbs, " number of pseudo devices");
 
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+static int fakelb_add_one(struct device *dev)
 {
-	struct fakelb_dev_priv *priv;
-	int err;
 	struct ieee802154_hw *hw;
+	struct fakelb_phy *phy;
+	int err;
 
-	hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+	hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops);
 	if (!hw)
 		return -ENOMEM;
 
-	priv = hw->priv;
-	priv->hw = hw;
+	phy = hw->priv;
+	phy->hw = hw;
 
 	/* 868 MHz BPSK	802.15.4-2003 */
-	hw->phy->channels_supported[0] |= 1;
+	hw->phy->supported.channels[0] |= 1;
 	/* 915 MHz BPSK	802.15.4-2003 */
-	hw->phy->channels_supported[0] |= 0x7fe;
+	hw->phy->supported.channels[0] |= 0x7fe;
 	/* 2.4 GHz O-QPSK 802.15.4-2003 */
-	hw->phy->channels_supported[0] |= 0x7FFF800;
+	hw->phy->supported.channels[0] |= 0x7FFF800;
 	/* 868 MHz ASK 802.15.4-2006 */
-	hw->phy->channels_supported[1] |= 1;
+	hw->phy->supported.channels[1] |= 1;
 	/* 915 MHz ASK 802.15.4-2006 */
-	hw->phy->channels_supported[1] |= 0x7fe;
+	hw->phy->supported.channels[1] |= 0x7fe;
 	/* 868 MHz O-QPSK 802.15.4-2006 */
-	hw->phy->channels_supported[2] |= 1;
+	hw->phy->supported.channels[2] |= 1;
 	/* 915 MHz O-QPSK 802.15.4-2006 */
-	hw->phy->channels_supported[2] |= 0x7fe;
+	hw->phy->supported.channels[2] |= 0x7fe;
 	/* 2.4 GHz CSS 802.15.4a-2007 */
-	hw->phy->channels_supported[3] |= 0x3fff;
+	hw->phy->supported.channels[3] |= 0x3fff;
 	/* UWB Sub-gigahertz 802.15.4a-2007 */
-	hw->phy->channels_supported[4] |= 1;
+	hw->phy->supported.channels[4] |= 1;
 	/* UWB Low band 802.15.4a-2007 */
-	hw->phy->channels_supported[4] |= 0x1e;
+	hw->phy->supported.channels[4] |= 0x1e;
 	/* UWB High band 802.15.4a-2007 */
-	hw->phy->channels_supported[4] |= 0xffe0;
+	hw->phy->supported.channels[4] |= 0xffe0;
 	/* 750 MHz O-QPSK 802.15.4c-2009 */
-	hw->phy->channels_supported[5] |= 0xf;
+	hw->phy->supported.channels[5] |= 0xf;
 	/* 750 MHz MPSK 802.15.4c-2009 */
-	hw->phy->channels_supported[5] |= 0xf0;
+	hw->phy->supported.channels[5] |= 0xf0;
 	/* 950 MHz BPSK 802.15.4d-2009 */
-	hw->phy->channels_supported[6] |= 0x3ff;
+	hw->phy->supported.channels[6] |= 0x3ff;
 	/* 950 MHz GFSK 802.15.4d-2009 */
-	hw->phy->channels_supported[6] |= 0x3ffc00;
+	hw->phy->supported.channels[6] |= 0x3ffc00;
 
-	INIT_LIST_HEAD(&priv->list);
-	priv->fake = fake;
-
-	spin_lock_init(&priv->lock);
+	ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+	/* fake phy channel 13 as default */
+	hw->phy->current_channel = 13;
+	phy->channel = hw->phy->current_channel;
 
 	hw->parent = dev;
 
@@ -190,67 +180,55 @@
 	if (err)
 		goto err_reg;
 
-	write_lock_bh(&fake->lock);
-	list_add_tail(&priv->list, &fake->list);
-	write_unlock_bh(&fake->lock);
+	spin_lock(&fakelb_phys_lock);
+	list_add_tail(&phy->list, &fakelb_phys);
+	spin_unlock(&fakelb_phys_lock);
 
 	return 0;
 
 err_reg:
-	ieee802154_free_hw(priv->hw);
+	ieee802154_free_hw(phy->hw);
 	return err;
 }
 
-static void fakelb_del(struct fakelb_dev_priv *priv)
+static void fakelb_del(struct fakelb_phy *phy)
 {
-	write_lock_bh(&priv->fake->lock);
-	list_del(&priv->list);
-	write_unlock_bh(&priv->fake->lock);
+	list_del(&phy->list);
 
-	ieee802154_unregister_hw(priv->hw);
-	ieee802154_free_hw(priv->hw);
+	ieee802154_unregister_hw(phy->hw);
+	ieee802154_free_hw(phy->hw);
 }
 
 static int fakelb_probe(struct platform_device *pdev)
 {
-	struct fakelb_priv *priv;
-	struct fakelb_dev_priv *dp;
-	int err = -ENOMEM;
-	int i;
-
-	priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
-			    GFP_KERNEL);
-	if (!priv)
-		goto err_alloc;
-
-	INIT_LIST_HEAD(&priv->list);
-	rwlock_init(&priv->lock);
+	struct fakelb_phy *phy, *tmp;
+	int err, i;
 
 	for (i = 0; i < numlbs; i++) {
-		err = fakelb_add_one(&pdev->dev, priv);
+		err = fakelb_add_one(&pdev->dev);
 		if (err < 0)
 			goto err_slave;
 	}
 
-	platform_set_drvdata(pdev, priv);
 	dev_info(&pdev->dev, "added ieee802154 hardware\n");
 	return 0;
 
 err_slave:
-	list_for_each_entry(dp, &priv->list, list)
-		fakelb_del(dp);
-err_alloc:
+	spin_lock(&fakelb_phys_lock);
+	list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+		fakelb_del(phy);
+	spin_unlock(&fakelb_phys_lock);
 	return err;
 }
 
 static int fakelb_remove(struct platform_device *pdev)
 {
-	struct fakelb_priv *priv = platform_get_drvdata(pdev);
-	struct fakelb_dev_priv *dp, *temp;
+	struct fakelb_phy *phy, *tmp;
 
-	list_for_each_entry_safe(dp, temp, &priv->list, list)
-		fakelb_del(dp);
-
+	spin_lock(&fakelb_phys_lock);
+	list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+		fakelb_del(phy);
+	spin_unlock(&fakelb_phys_lock);
 	return 0;
 }
 
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index fba2dfd..2549760 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -533,6 +533,7 @@
 	u8 lqi = 0;
 	u8 val;
 	int ret = 0;
+	int ret2;
 	struct sk_buff *skb;
 
 	/* Turn off reception of packets off the air. This prevents the
@@ -569,9 +570,9 @@
 
 out:
 	/* Turn back on reception of packets off the air. */
-	ret = read_short_reg(devrec, REG_BBREG1, &val);
-	if (ret)
-		return ret;
+	ret2 = read_short_reg(devrec, REG_BBREG1, &val);
+	if (ret2)
+		return ret2;
 	val &= ~0x4; /* Clear RXDECINV */
 	write_short_reg(devrec, REG_BBREG1, val);
 
@@ -750,9 +751,8 @@
 
 	devrec->hw->priv = devrec;
 	devrec->hw->parent = &devrec->spi->dev;
-	devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
-	devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
-			    IEEE802154_HW_AFILT;
+	devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
+	devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT;
 
 	dev_dbg(printdev(devrec), "registered mrf24j40\n");
 	ret = ieee802154_register_hw(devrec->hw);
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 54549a6..953a974 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -39,6 +39,8 @@
 #define IPVLAN_MAC_FILTER_SIZE	(1 << IPVLAN_MAC_FILTER_BITS)
 #define IPVLAN_MAC_FILTER_MASK	(IPVLAN_MAC_FILTER_SIZE - 1)
 
+#define IPVLAN_QBACKLOG_LIMIT	1000
+
 typedef enum {
 	IPVL_IPV6 = 0,
 	IPVL_ICMPV6,
@@ -93,6 +95,8 @@
 	struct hlist_head	hlhead[IPVLAN_HASH_SIZE];
 	struct list_head	ipvlans;
 	struct rcu_head		rcu;
+	struct work_struct	wq;
+	struct sk_buff_head	backlog;
 	int			count;
 	u16			mode;
 };
@@ -112,6 +116,7 @@
 void ipvlan_init_secret(void);
 unsigned int ipvlan_mac_hash(const unsigned char *addr);
 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
+void ipvlan_process_multicast(struct work_struct *work);
 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c30b5c3..8afbeda 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -189,62 +189,69 @@
 	return hash & IPVLAN_MAC_FILTER_MASK;
 }
 
-static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
-				   const struct ipvl_dev *in_dev, bool local)
+void ipvlan_process_multicast(struct work_struct *work)
 {
-	struct ethhdr *eth = eth_hdr(skb);
+	struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
+	struct ethhdr *ethh;
 	struct ipvl_dev *ipvlan;
-	struct sk_buff *nskb;
+	struct sk_buff *skb, *nskb;
+	struct sk_buff_head list;
 	unsigned int len;
 	unsigned int mac_hash;
 	int ret;
+	u8 pkt_type;
+	bool hlocal, dlocal;
 
-	if (skb->protocol == htons(ETH_P_PAUSE))
-		return;
+	__skb_queue_head_init(&list);
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
-		if (local && (ipvlan == in_dev))
-			continue;
+	spin_lock_bh(&port->backlog.lock);
+	skb_queue_splice_tail_init(&port->backlog, &list);
+	spin_unlock_bh(&port->backlog.lock);
 
-		mac_hash = ipvlan_mac_hash(eth->h_dest);
-		if (!test_bit(mac_hash, ipvlan->mac_filters))
-			continue;
+	while ((skb = __skb_dequeue(&list)) != NULL) {
+		ethh = eth_hdr(skb);
+		hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+		mac_hash = ipvlan_mac_hash(ethh->h_dest);
 
-		ret = NET_RX_DROP;
-		len = skb->len + ETH_HLEN;
-		nskb = skb_clone(skb, GFP_ATOMIC);
-		if (!nskb)
-			goto mcast_acct;
-
-		if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
-			nskb->pkt_type = PACKET_BROADCAST;
+		if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
+			pkt_type = PACKET_BROADCAST;
 		else
-			nskb->pkt_type = PACKET_MULTICAST;
+			pkt_type = PACKET_MULTICAST;
 
-		nskb->dev = ipvlan->dev;
-		if (local)
-			ret = dev_forward_skb(ipvlan->dev, nskb);
-		else
-			ret = netif_rx(nskb);
-mcast_acct:
-		ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
-	}
-	rcu_read_unlock();
+		dlocal = false;
+		rcu_read_lock();
+		list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
+			if (hlocal && (ipvlan->dev == skb->dev)) {
+				dlocal = true;
+				continue;
+			}
+			if (!test_bit(mac_hash, ipvlan->mac_filters))
+				continue;
 
-	/* Locally generated? ...Forward a copy to the main-device as
-	 * well. On the RX side we'll ignore it (wont give it to any
-	 * of the virtual devices.
-	 */
-	if (local) {
-		nskb = skb_clone(skb, GFP_ATOMIC);
-		if (nskb) {
-			if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
-				nskb->pkt_type = PACKET_BROADCAST;
+			ret = NET_RX_DROP;
+			len = skb->len + ETH_HLEN;
+			nskb = skb_clone(skb, GFP_ATOMIC);
+			if (!nskb)
+				goto acct;
+
+			nskb->pkt_type = pkt_type;
+			nskb->dev = ipvlan->dev;
+			if (hlocal)
+				ret = dev_forward_skb(ipvlan->dev, nskb);
 			else
-				nskb->pkt_type = PACKET_MULTICAST;
+				ret = netif_rx(nskb);
+acct:
+			ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+		}
+		rcu_read_unlock();
 
-			dev_forward_skb(port->dev, nskb);
+		if (dlocal) {
+			/* If the packet originated here, send it out. */
+			skb->dev = port->dev;
+			skb->pkt_type = pkt_type;
+			dev_queue_xmit(skb);
+		} else {
+			kfree_skb(skb);
 		}
 	}
 }
@@ -446,6 +453,26 @@
 	return ret;
 }
 
+static void ipvlan_multicast_enqueue(struct ipvl_port *port,
+				     struct sk_buff *skb)
+{
+	if (skb->protocol == htons(ETH_P_PAUSE)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	spin_lock(&port->backlog.lock);
+	if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+		__skb_queue_tail(&port->backlog, skb);
+		spin_unlock(&port->backlog.lock);
+		schedule_work(&port->wq);
+	} else {
+		spin_unlock(&port->backlog.lock);
+		atomic_long_inc(&skb->dev->rx_dropped);
+		kfree_skb(skb);
+	}
+}
+
 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
 {
 	const struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -493,11 +520,8 @@
 		return dev_forward_skb(ipvlan->phy_dev, skb);
 
 	} else if (is_multicast_ether_addr(eth->h_dest)) {
-		u8 ip_summed = skb->ip_summed;
-
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
-		skb->ip_summed = ip_summed;
+		ipvlan_multicast_enqueue(ipvlan->port, skb);
+		return NET_XMIT_SUCCESS;
 	}
 
 	skb->dev = ipvlan->phy_dev;
@@ -581,8 +605,18 @@
 	int addr_type;
 
 	if (is_multicast_ether_addr(eth->h_dest)) {
-		if (ipvlan_external_frame(skb, port))
-			ipvlan_multicast_frame(port, skb, NULL, false);
+		if (ipvlan_external_frame(skb, port)) {
+			struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+			/* External frames are queued for device local
+			 * distribution, but a copy is given to master
+			 * straight away to avoid sending duplicates later
+			 * when work-queue processes this frame. This is
+			 * achieved by returning RX_HANDLER_PASS.
+			 */
+			if (nskb)
+				ipvlan_multicast_enqueue(port, nskb);
+		}
 	} else {
 		struct ipvl_addr *addr;
 
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 77b92a0..1acc283 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -54,6 +54,9 @@
 	for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
 		INIT_HLIST_HEAD(&port->hlhead[idx]);
 
+	skb_queue_head_init(&port->backlog);
+	INIT_WORK(&port->wq, ipvlan_process_multicast);
+
 	err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
 	if (err)
 		goto err;
@@ -72,6 +75,8 @@
 
 	dev->priv_flags &= ~IFF_IPVLAN_MASTER;
 	netdev_rx_handler_unregister(dev);
+	cancel_work_sync(&port->wq);
+	__skb_queue_purge(&port->backlog);
 	kfree_rcu(port, rcu);
 }
 
@@ -213,17 +218,6 @@
 		dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
 }
 
-static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
-{
-	struct net_device *dev = ipvlan->dev;
-	unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
-
-	if (set && !test_bit(hashbit, ipvlan->mac_filters))
-		__set_bit(hashbit, ipvlan->mac_filters);
-	else if (!set && test_bit(hashbit, ipvlan->mac_filters))
-		__clear_bit(hashbit, ipvlan->mac_filters);
-}
-
 static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
 {
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -238,6 +232,12 @@
 		netdev_for_each_mc_addr(ha, dev)
 			__set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
 
+		/* Turn-on broadcast bit irrespective of address family,
+		 * since broadcast is deferred to a work-queue, hence no
+		 * impact on fast-path processing.
+		 */
+		__set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
+
 		bitmap_copy(ipvlan->mac_filters, mc_filters,
 			    IPVLAN_MAC_FILTER_SIZE);
 	}
@@ -705,7 +705,6 @@
 	 */
 	if (netif_running(ipvlan->dev))
 		ipvlan_ht_addr_add(ipvlan, addr);
-	ipvlan_set_broadcast_mac_filter(ipvlan, true);
 
 	return 0;
 }
@@ -722,8 +721,6 @@
 	list_del(&addr->anode);
 	ipvlan->ipv4cnt--;
 	WARN_ON(ipvlan->ipv4cnt < 0);
-	if (!ipvlan->ipv4cnt)
-	    ipvlan_set_broadcast_mac_filter(ipvlan, false);
 	kfree_rcu(addr, rcu);
 
 	return;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f6c9163..25f2196 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -848,7 +848,9 @@
 		 * Jean II */
 		self->rx_defer_timer.function = irda_usb_rx_defer_expired;
 		self->rx_defer_timer.data = (unsigned long) urb;
-		mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+		mod_timer(&self->rx_defer_timer,
+			  jiffies + msecs_to_jiffies(10));
+
 		return;
 	}
 	
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 8c350c5..6a64197f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -263,27 +263,21 @@
 static void macvtap_del_queues(struct net_device *dev)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
-	struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
-	int i, j = 0;
+	struct macvtap_queue *q, *tmp;
 
 	ASSERT_RTNL();
 	list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
 		list_del_init(&q->next);
-		qlist[j++] = q;
 		RCU_INIT_POINTER(q->vlan, NULL);
 		if (q->enabled)
 			vlan->numvtaps--;
 		vlan->numqueues--;
+		sock_put(&q->sk);
 	}
-	for (i = 0; i < vlan->numvtaps; i++)
-		RCU_INIT_POINTER(vlan->taps[i], NULL);
 	BUG_ON(vlan->numvtaps);
 	BUG_ON(vlan->numqueues);
 	/* guarantee that any future macvtap_set_queue will fail */
 	vlan->numvtaps = MAX_MACVTAP_QUEUES;
-
-	for (--j; j >= 0; j--)
-		sock_put(&qlist[j]->sk);
 }
 
 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
@@ -476,7 +470,7 @@
 
 	err = -ENOMEM;
 	q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
-					     &macvtap_proto);
+					     &macvtap_proto, 0);
 	if (!q)
 		goto out;
 
@@ -1006,6 +1000,7 @@
 	unsigned int __user *up = argp;
 	unsigned short u;
 	int __user *sp = argp;
+	struct sockaddr sa;
 	int s;
 	int ret;
 
@@ -1101,6 +1096,37 @@
 		rtnl_unlock();
 		return ret;
 
+	case SIOCGIFHWADDR:
+		rtnl_lock();
+		vlan = macvtap_get_vlan(q);
+		if (!vlan) {
+			rtnl_unlock();
+			return -ENOLINK;
+		}
+		ret = 0;
+		u = vlan->dev->type;
+		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+		    copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
+		    put_user(u, &ifr->ifr_hwaddr.sa_family))
+			ret = -EFAULT;
+		macvtap_put_vlan(vlan);
+		rtnl_unlock();
+		return ret;
+
+	case SIOCSIFHWADDR:
+		if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+			return -EFAULT;
+		rtnl_lock();
+		vlan = macvtap_get_vlan(q);
+		if (!vlan) {
+			rtnl_unlock();
+			return -ENOLINK;
+		}
+		ret = dev_set_mac_address(vlan->dev, &sa);
+		macvtap_put_vlan(vlan);
+		rtnl_unlock();
+		return ret;
+
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 70641d2..cf18940 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,13 +24,6 @@
 	---help---
 	  Currently supports the am79c874
 
-config AMD_XGBE_PHY
-	tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
-	depends on (OF || ACPI) && HAS_IOMEM
-	depends on ARM64 || COMPILE_TEST
-	---help---
-	  Currently supports the AMD 10GbE PHY
-
 config MARVELL_PHY
 	tristate "Drivers for Marvell PHYs"
 	---help---
@@ -119,6 +112,11 @@
 	---help---
 	  Supports the KSZ9021, VSC8201, KS8001 PHYs.
 
+config DP83867_PHY
+	tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
+	---help---
+	  Currently supports the DP83867 PHY.
+
 config FIXED_PHY
 	tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
 	depends on PHYLIB
@@ -212,7 +210,6 @@
 	  This hardware can be found in the Broadcom GENET Ethernet MAC
 	  controllers as well as some Broadcom Ethernet switches such as the
 	  Starfighter 2 switches.
-
 endif # PHYLIB
 
 config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 501ea769..fcc25a0 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_MDIO_GPIO)		+= mdio-gpio.o
 obj-$(CONFIG_NATIONAL_PHY)	+= national.o
 obj-$(CONFIG_DP83640_PHY)	+= dp83640.o
+obj-$(CONFIG_DP83867_PHY)	+= dp83867.o
 obj-$(CONFIG_STE10XP)		+= ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)	+= micrel.o
 obj-$(CONFIG_MDIO_OCTEON)	+= mdio-octeon.o
@@ -33,5 +34,4 @@
 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
 obj-$(CONFIG_MDIO_SUN4I)	+= mdio-sun4i.o
 obj-$(CONFIG_MDIO_MOXART)	+= mdio-moxart.o
-obj-$(CONFIG_AMD_XGBE_PHY)	+= amd-xgbe-phy.o
 obj-$(CONFIG_MDIO_BCM_UNIMAC)	+= mdio-bcm-unimac.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
deleted file mode 100644
index 34a75cb..0000000
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ /dev/null
@@ -1,1901 +0,0 @@
-/*
- * AMD 10Gb Ethernet PHY driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Advanced Micro Devices, Inc. nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/mdio.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/jiffies.h>
-
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("1.0.0-a");
-MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
-
-#define XGBE_PHY_ID	0x000162d0
-#define XGBE_PHY_MASK	0xfffffff0
-
-#define XGBE_PHY_SPEEDSET_PROPERTY	"amd,speed-set"
-#define XGBE_PHY_BLWC_PROPERTY		"amd,serdes-blwc"
-#define XGBE_PHY_CDR_RATE_PROPERTY	"amd,serdes-cdr-rate"
-#define XGBE_PHY_PQ_SKEW_PROPERTY	"amd,serdes-pq-skew"
-#define XGBE_PHY_TX_AMP_PROPERTY	"amd,serdes-tx-amp"
-#define XGBE_PHY_DFE_CFG_PROPERTY	"amd,serdes-dfe-tap-config"
-#define XGBE_PHY_DFE_ENA_PROPERTY	"amd,serdes-dfe-tap-enable"
-
-#define XGBE_PHY_SPEEDS			3
-#define XGBE_PHY_SPEED_1000		0
-#define XGBE_PHY_SPEED_2500		1
-#define XGBE_PHY_SPEED_10000		2
-
-#define XGBE_AN_MS_TIMEOUT		500
-
-#define XGBE_AN_INT_CMPLT		0x01
-#define XGBE_AN_INC_LINK		0x02
-#define XGBE_AN_PG_RCV			0x04
-#define XGBE_AN_INT_MASK		0x07
-
-#define XNP_MCF_NULL_MESSAGE		0x001
-#define XNP_ACK_PROCESSED		BIT(12)
-#define XNP_MP_FORMATTED		BIT(13)
-#define XNP_NP_EXCHANGE			BIT(15)
-
-#define XGBE_PHY_RATECHANGE_COUNT	500
-
-#define XGBE_PHY_KR_TRAINING_START	0x01
-#define XGBE_PHY_KR_TRAINING_ENABLE	0x02
-
-#define XGBE_PHY_FEC_ENABLE		0x01
-#define XGBE_PHY_FEC_FORWARD		0x02
-#define XGBE_PHY_FEC_MASK		0x03
-
-#ifndef MDIO_PMA_10GBR_PMD_CTRL
-#define MDIO_PMA_10GBR_PMD_CTRL		0x0096
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_ABILITY
-#define MDIO_PMA_10GBR_FEC_ABILITY	0x00aa
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_CTRL
-#define MDIO_PMA_10GBR_FEC_CTRL		0x00ab
-#endif
-
-#ifndef MDIO_AN_XNP
-#define MDIO_AN_XNP			0x0016
-#endif
-
-#ifndef MDIO_AN_LPX
-#define MDIO_AN_LPX			0x0019
-#endif
-
-#ifndef MDIO_AN_INTMASK
-#define MDIO_AN_INTMASK			0x8001
-#endif
-
-#ifndef MDIO_AN_INT
-#define MDIO_AN_INT			0x8002
-#endif
-
-#ifndef MDIO_CTRL1_SPEED1G
-#define MDIO_CTRL1_SPEED1G		(MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
-#endif
-
-/* SerDes integration register offsets */
-#define SIR0_KR_RT_1			0x002c
-#define SIR0_STATUS			0x0040
-#define SIR1_SPEED			0x0000
-
-/* SerDes integration register entry bit positions and sizes */
-#define SIR0_KR_RT_1_RESET_INDEX	11
-#define SIR0_KR_RT_1_RESET_WIDTH	1
-#define SIR0_STATUS_RX_READY_INDEX	0
-#define SIR0_STATUS_RX_READY_WIDTH	1
-#define SIR0_STATUS_TX_READY_INDEX	8
-#define SIR0_STATUS_TX_READY_WIDTH	1
-#define SIR1_SPEED_CDR_RATE_INDEX	12
-#define SIR1_SPEED_CDR_RATE_WIDTH	4
-#define SIR1_SPEED_DATARATE_INDEX	4
-#define SIR1_SPEED_DATARATE_WIDTH	2
-#define SIR1_SPEED_PLLSEL_INDEX		3
-#define SIR1_SPEED_PLLSEL_WIDTH		1
-#define SIR1_SPEED_RATECHANGE_INDEX	6
-#define SIR1_SPEED_RATECHANGE_WIDTH	1
-#define SIR1_SPEED_TXAMP_INDEX		8
-#define SIR1_SPEED_TXAMP_WIDTH		4
-#define SIR1_SPEED_WORDMODE_INDEX	0
-#define SIR1_SPEED_WORDMODE_WIDTH	3
-
-#define SPEED_10000_BLWC		0
-#define SPEED_10000_CDR			0x7
-#define SPEED_10000_PLL			0x1
-#define SPEED_10000_PQ			0x12
-#define SPEED_10000_RATE		0x0
-#define SPEED_10000_TXAMP		0xa
-#define SPEED_10000_WORD		0x7
-#define SPEED_10000_DFE_TAP_CONFIG	0x1
-#define SPEED_10000_DFE_TAP_ENABLE	0x7f
-
-#define SPEED_2500_BLWC			1
-#define SPEED_2500_CDR			0x2
-#define SPEED_2500_PLL			0x0
-#define SPEED_2500_PQ			0xa
-#define SPEED_2500_RATE			0x1
-#define SPEED_2500_TXAMP		0xf
-#define SPEED_2500_WORD			0x1
-#define SPEED_2500_DFE_TAP_CONFIG	0x3
-#define SPEED_2500_DFE_TAP_ENABLE	0x0
-
-#define SPEED_1000_BLWC			1
-#define SPEED_1000_CDR			0x2
-#define SPEED_1000_PLL			0x0
-#define SPEED_1000_PQ			0xa
-#define SPEED_1000_RATE			0x3
-#define SPEED_1000_TXAMP		0xf
-#define SPEED_1000_WORD			0x1
-#define SPEED_1000_DFE_TAP_CONFIG	0x3
-#define SPEED_1000_DFE_TAP_ENABLE	0x0
-
-/* SerDes RxTx register offsets */
-#define RXTX_REG6			0x0018
-#define RXTX_REG20			0x0050
-#define RXTX_REG22			0x0058
-#define RXTX_REG114			0x01c8
-#define RXTX_REG129			0x0204
-
-/* SerDes RxTx register entry bit positions and sizes */
-#define RXTX_REG6_RESETB_RXD_INDEX	8
-#define RXTX_REG6_RESETB_RXD_WIDTH	1
-#define RXTX_REG20_BLWC_ENA_INDEX	2
-#define RXTX_REG20_BLWC_ENA_WIDTH	1
-#define RXTX_REG114_PQ_REG_INDEX	9
-#define RXTX_REG114_PQ_REG_WIDTH	7
-#define RXTX_REG129_RXDFE_CONFIG_INDEX	14
-#define RXTX_REG129_RXDFE_CONFIG_WIDTH	2
-
-/* Bit setting and getting macros
- *  The get macro will extract the current bit field value from within
- *  the variable
- *
- *  The set macro will clear the current bit field value within the
- *  variable and then set the bit field of the variable to the
- *  specified value
- */
-#define GET_BITS(_var, _index, _width)					\
-	(((_var) >> (_index)) & ((0x1 << (_width)) - 1))
-
-#define SET_BITS(_var, _index, _width, _val)				\
-do {									\
-	(_var) &= ~(((0x1 << (_width)) - 1) << (_index));		\
-	(_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));	\
-} while (0)
-
-#define XSIR_GET_BITS(_var, _prefix, _field)				\
-	GET_BITS((_var),						\
-		 _prefix##_##_field##_INDEX,				\
-		 _prefix##_##_field##_WIDTH)
-
-#define XSIR_SET_BITS(_var, _prefix, _field, _val)			\
-	SET_BITS((_var),						\
-		 _prefix##_##_field##_INDEX,				\
-		 _prefix##_##_field##_WIDTH, (_val))
-
-/* Macros for reading or writing SerDes integration registers
- *  The ioread macros will get bit fields or full values using the
- *  register definitions formed using the input names
- *
- *  The iowrite macros will set bit fields or full values using the
- *  register definitions formed using the input names
- */
-#define XSIR0_IOREAD(_priv, _reg)					\
-	ioread16((_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOREAD_BITS(_priv, _reg, _field)				\
-	GET_BITS(XSIR0_IOREAD((_priv), _reg),				\
-		 _reg##_##_field##_INDEX,				\
-		 _reg##_##_field##_WIDTH)
-
-#define XSIR0_IOWRITE(_priv, _reg, _val)				\
-	iowrite16((_val), (_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val)			\
-do {									\
-	u16 reg_val = XSIR0_IOREAD((_priv), _reg);			\
-	SET_BITS(reg_val,						\
-		 _reg##_##_field##_INDEX,				\
-		 _reg##_##_field##_WIDTH, (_val));			\
-	XSIR0_IOWRITE((_priv), _reg, reg_val);				\
-} while (0)
-
-#define XSIR1_IOREAD(_priv, _reg)					\
-	ioread16((_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOREAD_BITS(_priv, _reg, _field)				\
-	GET_BITS(XSIR1_IOREAD((_priv), _reg),				\
-		 _reg##_##_field##_INDEX,				\
-		 _reg##_##_field##_WIDTH)
-
-#define XSIR1_IOWRITE(_priv, _reg, _val)				\
-	iowrite16((_val), (_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val)			\
-do {									\
-	u16 reg_val = XSIR1_IOREAD((_priv), _reg);			\
-	SET_BITS(reg_val,						\
-		 _reg##_##_field##_INDEX,				\
-		 _reg##_##_field##_WIDTH, (_val));			\
-	XSIR1_IOWRITE((_priv), _reg, reg_val);				\
-} while (0)
-
-/* Macros for reading or writing SerDes RxTx registers
- *  The ioread macros will get bit fields or full values using the
- *  register definitions formed using the input names
- *
- *  The iowrite macros will set bit fields or full values using the
- *  register definitions formed using the input names
- */
-#define XRXTX_IOREAD(_priv, _reg)					\
-	ioread16((_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOREAD_BITS(_priv, _reg, _field)				\
-	GET_BITS(XRXTX_IOREAD((_priv), _reg),				\
-		 _reg##_##_field##_INDEX,				\
-		 _reg##_##_field##_WIDTH)
-
-#define XRXTX_IOWRITE(_priv, _reg, _val)				\
-	iowrite16((_val), (_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val)			\
-do {									\
-	u16 reg_val = XRXTX_IOREAD((_priv), _reg);			\
-	SET_BITS(reg_val,						\
-		 _reg##_##_field##_INDEX,				\
-		 _reg##_##_field##_WIDTH, (_val));			\
-	XRXTX_IOWRITE((_priv), _reg, reg_val);				\
-} while (0)
-
-static const u32 amd_xgbe_phy_serdes_blwc[] = {
-	SPEED_1000_BLWC,
-	SPEED_2500_BLWC,
-	SPEED_10000_BLWC,
-};
-
-static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
-	SPEED_1000_CDR,
-	SPEED_2500_CDR,
-	SPEED_10000_CDR,
-};
-
-static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
-	SPEED_1000_PQ,
-	SPEED_2500_PQ,
-	SPEED_10000_PQ,
-};
-
-static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
-	SPEED_1000_TXAMP,
-	SPEED_2500_TXAMP,
-	SPEED_10000_TXAMP,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
-	SPEED_1000_DFE_TAP_CONFIG,
-	SPEED_2500_DFE_TAP_CONFIG,
-	SPEED_10000_DFE_TAP_CONFIG,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
-	SPEED_1000_DFE_TAP_ENABLE,
-	SPEED_2500_DFE_TAP_ENABLE,
-	SPEED_10000_DFE_TAP_ENABLE,
-};
-
-enum amd_xgbe_phy_an {
-	AMD_XGBE_AN_READY = 0,
-	AMD_XGBE_AN_PAGE_RECEIVED,
-	AMD_XGBE_AN_INCOMPAT_LINK,
-	AMD_XGBE_AN_COMPLETE,
-	AMD_XGBE_AN_NO_LINK,
-	AMD_XGBE_AN_ERROR,
-};
-
-enum amd_xgbe_phy_rx {
-	AMD_XGBE_RX_BPA = 0,
-	AMD_XGBE_RX_XNP,
-	AMD_XGBE_RX_COMPLETE,
-	AMD_XGBE_RX_ERROR,
-};
-
-enum amd_xgbe_phy_mode {
-	AMD_XGBE_MODE_KR,
-	AMD_XGBE_MODE_KX,
-};
-
-enum amd_xgbe_phy_speedset {
-	AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
-	AMD_XGBE_PHY_SPEEDSET_2500_10000,
-};
-
-struct amd_xgbe_phy_priv {
-	struct platform_device *pdev;
-	struct acpi_device *adev;
-	struct device *dev;
-
-	struct phy_device *phydev;
-
-	/* SerDes related mmio resources */
-	struct resource *rxtx_res;
-	struct resource *sir0_res;
-	struct resource *sir1_res;
-
-	/* SerDes related mmio registers */
-	void __iomem *rxtx_regs;	/* SerDes Rx/Tx CSRs */
-	void __iomem *sir0_regs;	/* SerDes integration registers (1/2) */
-	void __iomem *sir1_regs;	/* SerDes integration registers (2/2) */
-
-	int an_irq;
-	char an_irq_name[IFNAMSIZ + 32];
-	struct work_struct an_irq_work;
-	unsigned int an_irq_allocated;
-
-	unsigned int speed_set;
-
-	/* SerDes UEFI configurable settings.
-	 *   Switching between modes/speeds requires new values for some
-	 *   SerDes settings.  The values can be supplied as device
-	 *   properties in array format.  The first array entry is for
-	 *   1GbE, second for 2.5GbE and third for 10GbE
-	 */
-	u32 serdes_blwc[XGBE_PHY_SPEEDS];
-	u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
-	u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
-	u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
-	u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
-	u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
-
-	/* Auto-negotiation state machine support */
-	struct mutex an_mutex;
-	enum amd_xgbe_phy_an an_result;
-	enum amd_xgbe_phy_an an_state;
-	enum amd_xgbe_phy_rx kr_state;
-	enum amd_xgbe_phy_rx kx_state;
-	struct work_struct an_work;
-	struct workqueue_struct *an_workqueue;
-	unsigned int an_supported;
-	unsigned int parallel_detect;
-	unsigned int fec_ability;
-	unsigned long an_start;
-
-	unsigned int lpm_ctrl;		/* CTRL1 for resume */
-};
-
-static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
-{
-	int ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-	if (ret < 0)
-		return ret;
-
-	ret |= XGBE_PHY_KR_TRAINING_ENABLE;
-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
-	return 0;
-}
-
-static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
-{
-	int ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
-{
-	int ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-	if (ret < 0)
-		return ret;
-
-	ret |= MDIO_CTRL1_LPOWER;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	usleep_range(75, 100);
-
-	ret &= ~MDIO_CTRL1_LPOWER;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	return 0;
-}
-
-static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-
-	/* Assert Rx and Tx ratechange */
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
-}
-
-static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	unsigned int wait;
-	u16 status;
-
-	/* Release Rx and Tx ratechange */
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
-
-	/* Wait for Rx and Tx ready */
-	wait = XGBE_PHY_RATECHANGE_COUNT;
-	while (wait--) {
-		usleep_range(50, 75);
-
-		status = XSIR0_IOREAD(priv, SIR0_STATUS);
-		if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
-		    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
-			goto rx_reset;
-	}
-
-	netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
-		   status);
-
-rx_reset:
-	/* Perform Rx reset for the DFE changes */
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
-}
-
-static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	/* Enable KR training */
-	ret = amd_xgbe_an_enable_kr_training(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Set PCS to KR/10G speed */
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_PCS_CTRL2_TYPE;
-	ret |= MDIO_PCS_CTRL2_10GBR;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_CTRL1_SPEEDSEL;
-	ret |= MDIO_CTRL1_SPEED10G;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Set SerDes to 10G speed */
-	amd_xgbe_phy_serdes_start_ratechange(phydev);
-
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
-
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
-			   priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
-			   priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
-			   priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
-			   priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
-			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
-	XRXTX_IOWRITE(priv, RXTX_REG22,
-		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
-
-	amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	/* Disable KR training */
-	ret = amd_xgbe_an_disable_kr_training(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Set PCS to KX/1G speed */
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_PCS_CTRL2_TYPE;
-	ret |= MDIO_PCS_CTRL2_10GBX;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_CTRL1_SPEEDSEL;
-	ret |= MDIO_CTRL1_SPEED1G;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Set SerDes to 2.5G speed */
-	amd_xgbe_phy_serdes_start_ratechange(phydev);
-
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
-
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
-			   priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
-			   priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
-			   priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
-			   priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
-			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
-	XRXTX_IOWRITE(priv, RXTX_REG22,
-		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
-
-	amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	/* Disable KR training */
-	ret = amd_xgbe_an_disable_kr_training(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Set PCS to KX/1G speed */
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_PCS_CTRL2_TYPE;
-	ret |= MDIO_PCS_CTRL2_10GBX;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_CTRL1_SPEEDSEL;
-	ret |= MDIO_CTRL1_SPEED1G;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Set SerDes to 1G speed */
-	amd_xgbe_phy_serdes_start_ratechange(phydev);
-
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
-
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
-			   priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
-	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
-			   priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
-			   priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
-			   priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
-	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
-			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
-	XRXTX_IOWRITE(priv, RXTX_REG22,
-		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
-
-	amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
-				 enum amd_xgbe_phy_mode *mode)
-{
-	int ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-	if (ret < 0)
-		return ret;
-
-	if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
-		*mode = AMD_XGBE_MODE_KR;
-	else
-		*mode = AMD_XGBE_MODE_KX;
-
-	return 0;
-}
-
-static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
-{
-	enum amd_xgbe_phy_mode mode;
-
-	if (amd_xgbe_phy_cur_mode(phydev, &mode))
-		return false;
-
-	return (mode == AMD_XGBE_MODE_KR);
-}
-
-static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	/* If we are in KR switch to KX, and vice-versa */
-	if (amd_xgbe_phy_in_kr_mode(phydev)) {
-		if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
-			ret = amd_xgbe_phy_gmii_mode(phydev);
-		else
-			ret = amd_xgbe_phy_gmii_2500_mode(phydev);
-	} else {
-		ret = amd_xgbe_phy_xgmii_mode(phydev);
-	}
-
-	return ret;
-}
-
-static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
-				 enum amd_xgbe_phy_mode mode)
-{
-	enum amd_xgbe_phy_mode cur_mode;
-	int ret;
-
-	ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
-	if (ret)
-		return ret;
-
-	if (mode != cur_mode)
-		ret = amd_xgbe_phy_switch_mode(phydev);
-
-	return ret;
-}
-
-static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
-{
-	if (phydev->autoneg == AUTONEG_ENABLE) {
-		if (phydev->advertising & ADVERTISED_10000baseKR_Full)
-			return true;
-	} else {
-		if (phydev->speed == SPEED_10000)
-			return true;
-	}
-
-	return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
-{
-	if (phydev->autoneg == AUTONEG_ENABLE) {
-		if (phydev->advertising & ADVERTISED_2500baseX_Full)
-			return true;
-	} else {
-		if (phydev->speed == SPEED_2500)
-			return true;
-	}
-
-	return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
-{
-	if (phydev->autoneg == AUTONEG_ENABLE) {
-		if (phydev->advertising & ADVERTISED_1000baseKX_Full)
-			return true;
-	} else {
-		if (phydev->speed == SPEED_1000)
-			return true;
-	}
-
-	return false;
-}
-
-static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
-			       bool restart)
-{
-	int ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
-	if (ret < 0)
-		return ret;
-
-	ret &= ~MDIO_AN_CTRL1_ENABLE;
-
-	if (enable)
-		ret |= MDIO_AN_CTRL1_ENABLE;
-
-	if (restart)
-		ret |= MDIO_AN_CTRL1_RESTART;
-
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
-{
-	return amd_xgbe_phy_set_an(phydev, true, true);
-}
-
-static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
-{
-	return amd_xgbe_phy_set_an(phydev, false, false);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
-						    enum amd_xgbe_phy_rx *state)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ad_reg, lp_reg, ret;
-
-	*state = AMD_XGBE_RX_COMPLETE;
-
-	/* If we're not in KR mode then we're done */
-	if (!amd_xgbe_phy_in_kr_mode(phydev))
-		return AMD_XGBE_AN_PAGE_RECEIVED;
-
-	/* Enable/Disable FEC */
-	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
-	if (ad_reg < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
-	if (lp_reg < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
-	if (ret < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	ret &= ~XGBE_PHY_FEC_MASK;
-	if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
-		ret |= priv->fec_ability;
-
-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
-
-	/* Start KR training */
-	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-	if (ret < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
-		XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
-
-		ret |= XGBE_PHY_KR_TRAINING_START;
-		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
-			      ret);
-
-		XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
-	}
-
-	return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
-					       enum amd_xgbe_phy_rx *state)
-{
-	u16 msg;
-
-	*state = AMD_XGBE_RX_XNP;
-
-	msg = XNP_MCF_NULL_MESSAGE;
-	msg |= XNP_MP_FORMATTED;
-
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
-
-	return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
-					       enum amd_xgbe_phy_rx *state)
-{
-	unsigned int link_support;
-	int ret, ad_reg, lp_reg;
-
-	/* Read Base Ability register 2 first */
-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
-	if (ret < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	/* Check for a supported mode, otherwise restart in a different one */
-	link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
-	if (!(ret & link_support))
-		return AMD_XGBE_AN_INCOMPAT_LINK;
-
-	/* Check Extended Next Page support */
-	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-	if (ad_reg < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
-	if (lp_reg < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
-	       amd_xgbe_an_tx_xnp(phydev, state) :
-	       amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
-					       enum amd_xgbe_phy_rx *state)
-{
-	int ad_reg, lp_reg;
-
-	/* Check Extended Next Page support */
-	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
-	if (ad_reg < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
-	if (lp_reg < 0)
-		return AMD_XGBE_AN_ERROR;
-
-	return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
-	       amd_xgbe_an_tx_xnp(phydev, state) :
-	       amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	enum amd_xgbe_phy_rx *state;
-	unsigned long an_timeout;
-	int ret;
-
-	if (!priv->an_start) {
-		priv->an_start = jiffies;
-	} else {
-		an_timeout = priv->an_start +
-			     msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
-		if (time_after(jiffies, an_timeout)) {
-			/* Auto-negotiation timed out, reset state */
-			priv->kr_state = AMD_XGBE_RX_BPA;
-			priv->kx_state = AMD_XGBE_RX_BPA;
-
-			priv->an_start = jiffies;
-		}
-	}
-
-	state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
-						: &priv->kx_state;
-
-	switch (*state) {
-	case AMD_XGBE_RX_BPA:
-		ret = amd_xgbe_an_rx_bpa(phydev, state);
-		break;
-
-	case AMD_XGBE_RX_XNP:
-		ret = amd_xgbe_an_rx_xnp(phydev, state);
-		break;
-
-	default:
-		ret = AMD_XGBE_AN_ERROR;
-	}
-
-	return ret;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	/* Be sure we aren't looping trying to negotiate */
-	if (amd_xgbe_phy_in_kr_mode(phydev)) {
-		priv->kr_state = AMD_XGBE_RX_ERROR;
-
-		if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
-		    !(phydev->advertising & SUPPORTED_2500baseX_Full))
-			return AMD_XGBE_AN_NO_LINK;
-
-		if (priv->kx_state != AMD_XGBE_RX_BPA)
-			return AMD_XGBE_AN_NO_LINK;
-	} else {
-		priv->kx_state = AMD_XGBE_RX_ERROR;
-
-		if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
-			return AMD_XGBE_AN_NO_LINK;
-
-		if (priv->kr_state != AMD_XGBE_RX_BPA)
-			return AMD_XGBE_AN_NO_LINK;
-	}
-
-	ret = amd_xgbe_phy_disable_an(phydev);
-	if (ret)
-		return AMD_XGBE_AN_ERROR;
-
-	ret = amd_xgbe_phy_switch_mode(phydev);
-	if (ret)
-		return AMD_XGBE_AN_ERROR;
-
-	ret = amd_xgbe_phy_restart_an(phydev);
-	if (ret)
-		return AMD_XGBE_AN_ERROR;
-
-	return AMD_XGBE_AN_INCOMPAT_LINK;
-}
-
-static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
-{
-	struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
-
-	/* Interrupt reason must be read and cleared outside of IRQ context */
-	disable_irq_nosync(priv->an_irq);
-
-	queue_work(priv->an_workqueue, &priv->an_irq_work);
-
-	return IRQ_HANDLED;
-}
-
-static void amd_xgbe_an_irq_work(struct work_struct *work)
-{
-	struct amd_xgbe_phy_priv *priv = container_of(work,
-						      struct amd_xgbe_phy_priv,
-						      an_irq_work);
-
-	/* Avoid a race between enabling the IRQ and exiting the work by
-	 * waiting for the work to finish and then queueing it
-	 */
-	flush_work(&priv->an_work);
-	queue_work(priv->an_workqueue, &priv->an_work);
-}
-
-static void amd_xgbe_an_state_machine(struct work_struct *work)
-{
-	struct amd_xgbe_phy_priv *priv = container_of(work,
-						      struct amd_xgbe_phy_priv,
-						      an_work);
-	struct phy_device *phydev = priv->phydev;
-	enum amd_xgbe_phy_an cur_state = priv->an_state;
-	int int_reg, int_mask;
-
-	mutex_lock(&priv->an_mutex);
-
-	/* Read the interrupt */
-	int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
-	if (!int_reg)
-		goto out;
-
-next_int:
-	if (int_reg < 0) {
-		priv->an_state = AMD_XGBE_AN_ERROR;
-		int_mask = XGBE_AN_INT_MASK;
-	} else if (int_reg & XGBE_AN_PG_RCV) {
-		priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
-		int_mask = XGBE_AN_PG_RCV;
-	} else if (int_reg & XGBE_AN_INC_LINK) {
-		priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
-		int_mask = XGBE_AN_INC_LINK;
-	} else if (int_reg & XGBE_AN_INT_CMPLT) {
-		priv->an_state = AMD_XGBE_AN_COMPLETE;
-		int_mask = XGBE_AN_INT_CMPLT;
-	} else {
-		priv->an_state = AMD_XGBE_AN_ERROR;
-		int_mask = 0;
-	}
-
-	/* Clear the interrupt to be processed */
-	int_reg &= ~int_mask;
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
-
-	priv->an_result = priv->an_state;
-
-again:
-	cur_state = priv->an_state;
-
-	switch (priv->an_state) {
-	case AMD_XGBE_AN_READY:
-		priv->an_supported = 0;
-		break;
-
-	case AMD_XGBE_AN_PAGE_RECEIVED:
-		priv->an_state = amd_xgbe_an_page_received(phydev);
-		priv->an_supported++;
-		break;
-
-	case AMD_XGBE_AN_INCOMPAT_LINK:
-		priv->an_supported = 0;
-		priv->parallel_detect = 0;
-		priv->an_state = amd_xgbe_an_incompat_link(phydev);
-		break;
-
-	case AMD_XGBE_AN_COMPLETE:
-		priv->parallel_detect = priv->an_supported ? 0 : 1;
-		netdev_dbg(phydev->attached_dev, "%s successful\n",
-			   priv->an_supported ? "Auto negotiation"
-					      : "Parallel detection");
-		break;
-
-	case AMD_XGBE_AN_NO_LINK:
-		break;
-
-	default:
-		priv->an_state = AMD_XGBE_AN_ERROR;
-	}
-
-	if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
-		int_reg = 0;
-		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-	} else if (priv->an_state == AMD_XGBE_AN_ERROR) {
-		netdev_err(phydev->attached_dev,
-			   "error during auto-negotiation, state=%u\n",
-			   cur_state);
-
-		int_reg = 0;
-		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-	}
-
-	if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
-		priv->an_result = priv->an_state;
-		priv->an_state = AMD_XGBE_AN_READY;
-		priv->kr_state = AMD_XGBE_RX_BPA;
-		priv->kx_state = AMD_XGBE_RX_BPA;
-		priv->an_start = 0;
-	}
-
-	if (cur_state != priv->an_state)
-		goto again;
-
-	if (int_reg)
-		goto next_int;
-
-out:
-	enable_irq(priv->an_irq);
-
-	mutex_unlock(&priv->an_mutex);
-}
-
-static int amd_xgbe_an_init(struct phy_device *phydev)
-{
-	int ret;
-
-	/* Set up Advertisement register 3 first */
-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
-	if (ret < 0)
-		return ret;
-
-	if (phydev->advertising & SUPPORTED_10000baseR_FEC)
-		ret |= 0xc000;
-	else
-		ret &= ~0xc000;
-
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
-
-	/* Set up Advertisement register 2 next */
-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
-	if (ret < 0)
-		return ret;
-
-	if (phydev->advertising & SUPPORTED_10000baseKR_Full)
-		ret |= 0x80;
-	else
-		ret &= ~0x80;
-
-	if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
-	    (phydev->advertising & SUPPORTED_2500baseX_Full))
-		ret |= 0x20;
-	else
-		ret &= ~0x20;
-
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
-
-	/* Set up Advertisement register 1 last */
-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-	if (ret < 0)
-		return ret;
-
-	if (phydev->advertising & SUPPORTED_Pause)
-		ret |= 0x400;
-	else
-		ret &= ~0x400;
-
-	if (phydev->advertising & SUPPORTED_Asym_Pause)
-		ret |= 0x800;
-	else
-		ret &= ~0x800;
-
-	/* We don't intend to perform XNP */
-	ret &= ~XNP_NP_EXCHANGE;
-
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
-{
-	int count, ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-	if (ret < 0)
-		return ret;
-
-	ret |= MDIO_CTRL1_RESET;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	count = 50;
-	do {
-		msleep(20);
-		ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-		if (ret < 0)
-			return ret;
-	} while ((ret & MDIO_CTRL1_RESET) && --count);
-
-	if (ret & MDIO_CTRL1_RESET)
-		return -ETIMEDOUT;
-
-	/* Disable auto-negotiation for now */
-	ret = amd_xgbe_phy_disable_an(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Clear auto-negotiation interrupts */
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_config_init(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	struct net_device *netdev = phydev->attached_dev;
-	int ret;
-
-	if (!priv->an_irq_allocated) {
-		/* Allocate the auto-negotiation workqueue and interrupt */
-		snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
-			 "%s-pcs", netdev_name(netdev));
-
-		priv->an_workqueue =
-			create_singlethread_workqueue(priv->an_irq_name);
-		if (!priv->an_workqueue) {
-			netdev_err(netdev, "phy workqueue creation failed\n");
-			return -ENOMEM;
-		}
-
-		ret = devm_request_irq(priv->dev, priv->an_irq,
-				       amd_xgbe_an_isr, 0, priv->an_irq_name,
-				       priv);
-		if (ret) {
-			netdev_err(netdev, "phy irq request failed\n");
-			destroy_workqueue(priv->an_workqueue);
-			return ret;
-		}
-
-		priv->an_irq_allocated = 1;
-	}
-
-	/* Set initial mode - call the mode setting routines
-	 * directly to insure we are properly configured
-	 */
-	if (amd_xgbe_phy_use_xgmii_mode(phydev))
-		ret = amd_xgbe_phy_xgmii_mode(phydev);
-	else if (amd_xgbe_phy_use_gmii_mode(phydev))
-		ret = amd_xgbe_phy_gmii_mode(phydev);
-	else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
-		ret = amd_xgbe_phy_gmii_2500_mode(phydev);
-	else
-		ret = -EINVAL;
-	if (ret < 0)
-		return ret;
-
-	/* Set up advertisement registers based on current settings */
-	ret = amd_xgbe_an_init(phydev);
-	if (ret)
-		return ret;
-
-	/* Enable auto-negotiation interrupts */
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-
-	return 0;
-}
-
-static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
-{
-	int ret;
-
-	/* Disable auto-negotiation */
-	ret = amd_xgbe_phy_disable_an(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Validate/Set specified speed */
-	switch (phydev->speed) {
-	case SPEED_10000:
-		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-		break;
-
-	case SPEED_2500:
-	case SPEED_1000:
-		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
-		break;
-
-	default:
-		ret = -EINVAL;
-	}
-
-	if (ret < 0)
-		return ret;
-
-	/* Validate duplex mode */
-	if (phydev->duplex != DUPLEX_FULL)
-		return -EINVAL;
-
-	phydev->pause = 0;
-	phydev->asym_pause = 0;
-
-	return 0;
-}
-
-static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	u32 mmd_mask = phydev->c45_ids.devices_in_package;
-	int ret;
-
-	if (phydev->autoneg != AUTONEG_ENABLE)
-		return amd_xgbe_phy_setup_forced(phydev);
-
-	/* Make sure we have the AN MMD present */
-	if (!(mmd_mask & MDIO_DEVS_AN))
-		return -EINVAL;
-
-	/* Disable auto-negotiation interrupt */
-	disable_irq(priv->an_irq);
-
-	/* Start auto-negotiation in a supported mode */
-	if (phydev->advertising & SUPPORTED_10000baseKR_Full)
-		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-	else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
-		 (phydev->advertising & SUPPORTED_2500baseX_Full))
-		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
-	else
-		ret = -EINVAL;
-	if (ret < 0) {
-		enable_irq(priv->an_irq);
-		return ret;
-	}
-
-	/* Disable and stop any in progress auto-negotiation */
-	ret = amd_xgbe_phy_disable_an(phydev);
-	if (ret < 0)
-		return ret;
-
-	/* Clear any auto-negotitation interrupts */
-	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
-	priv->an_result = AMD_XGBE_AN_READY;
-	priv->an_state = AMD_XGBE_AN_READY;
-	priv->kr_state = AMD_XGBE_RX_BPA;
-	priv->kx_state = AMD_XGBE_RX_BPA;
-
-	/* Re-enable auto-negotiation interrupt */
-	enable_irq(priv->an_irq);
-
-	/* Set up advertisement registers based on current settings */
-	ret = amd_xgbe_an_init(phydev);
-	if (ret)
-		return ret;
-
-	/* Enable and start auto-negotiation */
-	return amd_xgbe_phy_restart_an(phydev);
-}
-
-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	mutex_lock(&priv->an_mutex);
-
-	ret = __amd_xgbe_phy_config_aneg(phydev);
-
-	mutex_unlock(&priv->an_mutex);
-
-	return ret;
-}
-
-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-
-	return (priv->an_result == AMD_XGBE_AN_COMPLETE);
-}
-
-static int amd_xgbe_phy_update_link(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	/* If we're doing auto-negotiation don't report link down */
-	if (priv->an_state != AMD_XGBE_AN_READY) {
-		phydev->link = 1;
-		return 0;
-	}
-
-	/* Link status is latched low, so read once to clear
-	 * and then read again to get current state
-	 */
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
-	if (ret < 0)
-		return ret;
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
-	if (ret < 0)
-		return ret;
-
-	phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
-
-	return 0;
-}
-
-static int amd_xgbe_phy_read_status(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	u32 mmd_mask = phydev->c45_ids.devices_in_package;
-	int ret, ad_ret, lp_ret;
-
-	ret = amd_xgbe_phy_update_link(phydev);
-	if (ret)
-		return ret;
-
-	if ((phydev->autoneg == AUTONEG_ENABLE) &&
-	    !priv->parallel_detect) {
-		if (!(mmd_mask & MDIO_DEVS_AN))
-			return -EINVAL;
-
-		if (!amd_xgbe_phy_aneg_done(phydev))
-			return 0;
-
-		/* Compare Advertisement and Link Partner register 1 */
-		ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-		if (ad_ret < 0)
-			return ad_ret;
-		lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
-		if (lp_ret < 0)
-			return lp_ret;
-
-		ad_ret &= lp_ret;
-		phydev->pause = (ad_ret & 0x400) ? 1 : 0;
-		phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
-
-		/* Compare Advertisement and Link Partner register 2 */
-		ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
-				      MDIO_AN_ADVERTISE + 1);
-		if (ad_ret < 0)
-			return ad_ret;
-		lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
-		if (lp_ret < 0)
-			return lp_ret;
-
-		ad_ret &= lp_ret;
-		if (ad_ret & 0x80) {
-			phydev->speed = SPEED_10000;
-			ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-			if (ret)
-				return ret;
-		} else {
-			switch (priv->speed_set) {
-			case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-				phydev->speed = SPEED_1000;
-				break;
-
-			case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-				phydev->speed = SPEED_2500;
-				break;
-			}
-
-			ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
-			if (ret)
-				return ret;
-		}
-
-		phydev->duplex = DUPLEX_FULL;
-	} else {
-		if (amd_xgbe_phy_in_kr_mode(phydev)) {
-			phydev->speed = SPEED_10000;
-		} else {
-			switch (priv->speed_set) {
-			case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-				phydev->speed = SPEED_1000;
-				break;
-
-			case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-				phydev->speed = SPEED_2500;
-				break;
-			}
-		}
-		phydev->duplex = DUPLEX_FULL;
-		phydev->pause = 0;
-		phydev->asym_pause = 0;
-	}
-
-	return 0;
-}
-
-static int amd_xgbe_phy_suspend(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	int ret;
-
-	mutex_lock(&phydev->lock);
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-	if (ret < 0)
-		goto unlock;
-
-	priv->lpm_ctrl = ret;
-
-	ret |= MDIO_CTRL1_LPOWER;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-	ret = 0;
-
-unlock:
-	mutex_unlock(&phydev->lock);
-
-	return ret;
-}
-
-static int amd_xgbe_phy_resume(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-
-	mutex_lock(&phydev->lock);
-
-	priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
-	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
-
-	mutex_unlock(&phydev->lock);
-
-	return 0;
-}
-
-static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
-						unsigned int type)
-{
-	unsigned int count;
-	int i;
-
-	for (i = 0, count = 0; i < pdev->num_resources; i++) {
-		struct resource *r = &pdev->resource[i];
-
-		if (type == resource_type(r))
-			count++;
-	}
-
-	return count;
-}
-
-static int amd_xgbe_phy_probe(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv;
-	struct platform_device *phy_pdev;
-	struct device *dev, *phy_dev;
-	unsigned int phy_resnum, phy_irqnum;
-	int ret;
-
-	if (!phydev->bus || !phydev->bus->parent)
-		return -EINVAL;
-
-	dev = phydev->bus->parent;
-
-	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->pdev = to_platform_device(dev);
-	priv->adev = ACPI_COMPANION(dev);
-	priv->dev = dev;
-	priv->phydev = phydev;
-	mutex_init(&priv->an_mutex);
-	INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
-	INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
-
-	if (!priv->adev || acpi_disabled) {
-		struct device_node *bus_node;
-		struct device_node *phy_node;
-
-		bus_node = priv->dev->of_node;
-		phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
-		if (!phy_node) {
-			dev_err(dev, "unable to parse phy-handle\n");
-			ret = -EINVAL;
-			goto err_priv;
-		}
-
-		phy_pdev = of_find_device_by_node(phy_node);
-		of_node_put(phy_node);
-
-		if (!phy_pdev) {
-			dev_err(dev, "unable to obtain phy device\n");
-			ret = -EINVAL;
-			goto err_priv;
-		}
-
-		phy_resnum = 0;
-		phy_irqnum = 0;
-	} else {
-		/* In ACPI, the XGBE and PHY resources are the grouped
-		 * together with the PHY resources at the end
-		 */
-		phy_pdev = priv->pdev;
-		phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
-							 IORESOURCE_MEM) - 3;
-		phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
-							 IORESOURCE_IRQ) - 1;
-	}
-	phy_dev = &phy_pdev->dev;
-
-	/* Get the device mmio areas */
-	priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
-					       phy_resnum++);
-	priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
-	if (IS_ERR(priv->rxtx_regs)) {
-		dev_err(dev, "rxtx ioremap failed\n");
-		ret = PTR_ERR(priv->rxtx_regs);
-		goto err_put;
-	}
-
-	priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
-					       phy_resnum++);
-	priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
-	if (IS_ERR(priv->sir0_regs)) {
-		dev_err(dev, "sir0 ioremap failed\n");
-		ret = PTR_ERR(priv->sir0_regs);
-		goto err_rxtx;
-	}
-
-	priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
-					       phy_resnum++);
-	priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
-	if (IS_ERR(priv->sir1_regs)) {
-		dev_err(dev, "sir1 ioremap failed\n");
-		ret = PTR_ERR(priv->sir1_regs);
-		goto err_sir0;
-	}
-
-	/* Get the auto-negotiation interrupt */
-	ret = platform_get_irq(phy_pdev, phy_irqnum);
-	if (ret < 0) {
-		dev_err(dev, "platform_get_irq failed\n");
-		goto err_sir1;
-	}
-	priv->an_irq = ret;
-
-	/* Get the device speed set property */
-	ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
-				       &priv->speed_set);
-	if (ret) {
-		dev_err(dev, "invalid %s property\n",
-			XGBE_PHY_SPEEDSET_PROPERTY);
-		goto err_sir1;
-	}
-
-	switch (priv->speed_set) {
-	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-		break;
-	default:
-		dev_err(dev, "invalid %s property\n",
-			XGBE_PHY_SPEEDSET_PROPERTY);
-		ret = -EINVAL;
-		goto err_sir1;
-	}
-
-	if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
-		ret = device_property_read_u32_array(phy_dev,
-						     XGBE_PHY_BLWC_PROPERTY,
-						     priv->serdes_blwc,
-						     XGBE_PHY_SPEEDS);
-		if (ret) {
-			dev_err(dev, "invalid %s property\n",
-				XGBE_PHY_BLWC_PROPERTY);
-			goto err_sir1;
-		}
-	} else {
-		memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
-		       sizeof(priv->serdes_blwc));
-	}
-
-	if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
-		ret = device_property_read_u32_array(phy_dev,
-						     XGBE_PHY_CDR_RATE_PROPERTY,
-						     priv->serdes_cdr_rate,
-						     XGBE_PHY_SPEEDS);
-		if (ret) {
-			dev_err(dev, "invalid %s property\n",
-				XGBE_PHY_CDR_RATE_PROPERTY);
-			goto err_sir1;
-		}
-	} else {
-		memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
-		       sizeof(priv->serdes_cdr_rate));
-	}
-
-	if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
-		ret = device_property_read_u32_array(phy_dev,
-						     XGBE_PHY_PQ_SKEW_PROPERTY,
-						     priv->serdes_pq_skew,
-						     XGBE_PHY_SPEEDS);
-		if (ret) {
-			dev_err(dev, "invalid %s property\n",
-				XGBE_PHY_PQ_SKEW_PROPERTY);
-			goto err_sir1;
-		}
-	} else {
-		memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
-		       sizeof(priv->serdes_pq_skew));
-	}
-
-	if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
-		ret = device_property_read_u32_array(phy_dev,
-						     XGBE_PHY_TX_AMP_PROPERTY,
-						     priv->serdes_tx_amp,
-						     XGBE_PHY_SPEEDS);
-		if (ret) {
-			dev_err(dev, "invalid %s property\n",
-				XGBE_PHY_TX_AMP_PROPERTY);
-			goto err_sir1;
-		}
-	} else {
-		memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
-		       sizeof(priv->serdes_tx_amp));
-	}
-
-	if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
-		ret = device_property_read_u32_array(phy_dev,
-						     XGBE_PHY_DFE_CFG_PROPERTY,
-						     priv->serdes_dfe_tap_cfg,
-						     XGBE_PHY_SPEEDS);
-		if (ret) {
-			dev_err(dev, "invalid %s property\n",
-				XGBE_PHY_DFE_CFG_PROPERTY);
-			goto err_sir1;
-		}
-	} else {
-		memcpy(priv->serdes_dfe_tap_cfg,
-		       amd_xgbe_phy_serdes_dfe_tap_cfg,
-		       sizeof(priv->serdes_dfe_tap_cfg));
-	}
-
-	if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
-		ret = device_property_read_u32_array(phy_dev,
-						     XGBE_PHY_DFE_ENA_PROPERTY,
-						     priv->serdes_dfe_tap_ena,
-						     XGBE_PHY_SPEEDS);
-		if (ret) {
-			dev_err(dev, "invalid %s property\n",
-				XGBE_PHY_DFE_ENA_PROPERTY);
-			goto err_sir1;
-		}
-	} else {
-		memcpy(priv->serdes_dfe_tap_ena,
-		       amd_xgbe_phy_serdes_dfe_tap_ena,
-		       sizeof(priv->serdes_dfe_tap_ena));
-	}
-
-	/* Initialize supported features */
-	phydev->supported = SUPPORTED_Autoneg;
-	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-	phydev->supported |= SUPPORTED_Backplane;
-	phydev->supported |= SUPPORTED_10000baseKR_Full;
-	switch (priv->speed_set) {
-	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-		phydev->supported |= SUPPORTED_1000baseKX_Full;
-		break;
-	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-		phydev->supported |= SUPPORTED_2500baseX_Full;
-		break;
-	}
-
-	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
-	if (ret < 0)
-		return ret;
-	priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
-	if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
-		phydev->supported |= SUPPORTED_10000baseR_FEC;
-
-	phydev->advertising = phydev->supported;
-
-	phydev->priv = priv;
-
-	if (!priv->adev || acpi_disabled)
-		platform_device_put(phy_pdev);
-
-	return 0;
-
-err_sir1:
-	devm_iounmap(dev, priv->sir1_regs);
-	devm_release_mem_region(dev, priv->sir1_res->start,
-				resource_size(priv->sir1_res));
-
-err_sir0:
-	devm_iounmap(dev, priv->sir0_regs);
-	devm_release_mem_region(dev, priv->sir0_res->start,
-				resource_size(priv->sir0_res));
-
-err_rxtx:
-	devm_iounmap(dev, priv->rxtx_regs);
-	devm_release_mem_region(dev, priv->rxtx_res->start,
-				resource_size(priv->rxtx_res));
-
-err_put:
-	if (!priv->adev || acpi_disabled)
-		platform_device_put(phy_pdev);
-
-err_priv:
-	devm_kfree(dev, priv);
-
-	return ret;
-}
-
-static void amd_xgbe_phy_remove(struct phy_device *phydev)
-{
-	struct amd_xgbe_phy_priv *priv = phydev->priv;
-	struct device *dev = priv->dev;
-
-	if (priv->an_irq_allocated) {
-		devm_free_irq(dev, priv->an_irq, priv);
-
-		flush_workqueue(priv->an_workqueue);
-		destroy_workqueue(priv->an_workqueue);
-	}
-
-	/* Release resources */
-	devm_iounmap(dev, priv->sir1_regs);
-	devm_release_mem_region(dev, priv->sir1_res->start,
-				resource_size(priv->sir1_res));
-
-	devm_iounmap(dev, priv->sir0_regs);
-	devm_release_mem_region(dev, priv->sir0_res->start,
-				resource_size(priv->sir0_res));
-
-	devm_iounmap(dev, priv->rxtx_regs);
-	devm_release_mem_region(dev, priv->rxtx_res->start,
-				resource_size(priv->rxtx_res));
-
-	devm_kfree(dev, priv);
-}
-
-static int amd_xgbe_match_phy_device(struct phy_device *phydev)
-{
-	return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
-}
-
-static struct phy_driver amd_xgbe_phy_driver[] = {
-	{
-		.phy_id			= XGBE_PHY_ID,
-		.phy_id_mask		= XGBE_PHY_MASK,
-		.name			= "AMD XGBE PHY",
-		.features		= 0,
-		.flags			= PHY_IS_INTERNAL,
-		.probe			= amd_xgbe_phy_probe,
-		.remove			= amd_xgbe_phy_remove,
-		.soft_reset		= amd_xgbe_phy_soft_reset,
-		.config_init		= amd_xgbe_phy_config_init,
-		.suspend		= amd_xgbe_phy_suspend,
-		.resume			= amd_xgbe_phy_resume,
-		.config_aneg		= amd_xgbe_phy_config_aneg,
-		.aneg_done		= amd_xgbe_phy_aneg_done,
-		.read_status		= amd_xgbe_phy_read_status,
-		.match_phy_device	= amd_xgbe_match_phy_device,
-		.driver			= {
-			.owner = THIS_MODULE,
-		},
-	},
-};
-
-module_phy_driver(amd_xgbe_phy_driver);
-
-static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
-	{ XGBE_PHY_ID, XGBE_PHY_MASK },
-	{ }
-};
-MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b5dc59d..4dea85b 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -136,8 +136,8 @@
 	/* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
 	phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
 
-	/* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+	/* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+	phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
 
 	/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
 	phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
@@ -167,6 +167,9 @@
 	/* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
 	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
 
+	/* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+	phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+
 	/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
 	phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
 
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 0d16c7d..2a32870 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -158,6 +158,18 @@
 	.config_intr	= dm9161_config_intr,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
+	.phy_id		= 0x0181b8b0,
+	.name		= "Davicom DM9161B/C",
+	.phy_id_mask	= 0x0ffffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_init	= dm9161_config_init,
+	.config_aneg	= dm9161_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= dm9161_ack_interrupt,
+	.config_intr	= dm9161_config_intr,
+	.driver		= { .owner = THIS_MODULE,},
+}, {
 	.phy_id		= 0x0181b8a0,
 	.name		= "Davicom DM9161A",
 	.phy_id_mask	= 0x0ffffff0,
@@ -186,6 +198,7 @@
 
 static struct mdio_device_id __maybe_unused davicom_tbl[] = {
 	{ 0x0181b880, 0x0ffffff0 },
+	{ 0x0181b8b0, 0x0ffffff0 },
 	{ 0x0181b8a0, 0x0ffffff0 },
 	{ 0x00181b80, 0x0ffffff0 },
 	{ }
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
new file mode 100644
index 0000000..c7a12e2
--- /dev/null
+++ b/drivers/net/phy/dp83867.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for the Texas Instruments DP83867 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include <dt-bindings/net/ti-dp83867.h>
+
+#define DP83867_PHY_ID		0x2000a231
+#define DP83867_DEVADDR		0x1f
+
+#define MII_DP83867_PHYCTRL	0x10
+#define MII_DP83867_MICR	0x12
+#define MII_DP83867_ISR		0x13
+#define DP83867_CTRL		0x1f
+
+/* Extended Registers */
+#define DP83867_RGMIICTL	0x0032
+#define DP83867_RGMIIDCTL	0x0086
+
+#define DP83867_SW_RESET	BIT(15)
+#define DP83867_SW_RESTART	BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83867_MICR_AN_ERR_INT_EN		BIT(15)
+#define MII_DP83867_MICR_SPEED_CHNG_INT_EN	BIT(14)
+#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN	BIT(13)
+#define MII_DP83867_MICR_PAGE_RXD_INT_EN	BIT(12)
+#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN	BIT(11)
+#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN	BIT(10)
+#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN	BIT(8)
+#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN	BIT(4)
+#define MII_DP83867_MICR_WOL_INT_EN		BIT(3)
+#define MII_DP83867_MICR_XGMII_ERR_INT_EN	BIT(2)
+#define MII_DP83867_MICR_POL_CHNG_INT_EN	BIT(1)
+#define MII_DP83867_MICR_JABBER_INT_EN		BIT(0)
+
+/* RGMIICTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_EN		BIT(1)
+#define DP83867_RGMII_RX_CLK_DELAY_EN		BIT(0)
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_SHIFT		14
+
+/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_SHIFT	4
+
+struct dp83867_private {
+	int rx_id_delay;
+	int tx_id_delay;
+	int fifo_depth;
+};
+
+static int dp83867_ack_interrupt(struct phy_device *phydev)
+{
+	int err = phy_read(phydev, MII_DP83867_ISR);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int dp83867_config_intr(struct phy_device *phydev)
+{
+	int micr_status;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		micr_status = phy_read(phydev, MII_DP83867_MICR);
+		if (micr_status < 0)
+			return micr_status;
+
+		micr_status |=
+			(MII_DP83867_MICR_AN_ERR_INT_EN |
+			MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+			MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
+			MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+		return phy_write(phydev, MII_DP83867_MICR, micr_status);
+	}
+
+	micr_status = 0x0;
+	return phy_write(phydev, MII_DP83867_MICR, micr_status);
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83867_of_init(struct phy_device *phydev)
+{
+	struct dp83867_private *dp83867 = phydev->priv;
+	struct device *dev = &phydev->dev;
+	struct device_node *of_node = dev->of_node;
+	int ret;
+
+	if (!of_node && dev->parent->of_node)
+		of_node = dev->parent->of_node;
+
+	if (!phydev->dev.of_node)
+		return -ENODEV;
+
+	ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
+				   &dp83867->rx_id_delay);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
+				   &dp83867->tx_id_delay);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(of_node, "ti,fifo-depth",
+				   &dp83867->fifo_depth);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+#else
+static int dp83867_of_init(struct phy_device *phydev)
+{
+	return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+	struct dp83867_private *dp83867;
+	int ret;
+	u16 val, delay;
+
+	if (!phydev->priv) {
+		dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867),
+				       GFP_KERNEL);
+		if (!dp83867)
+			return -ENOMEM;
+
+		phydev->priv = dp83867;
+		ret = dp83867_of_init(phydev);
+		if (ret)
+			return ret;
+	} else {
+		dp83867 = (struct dp83867_private *)phydev->priv;
+	}
+
+	if (phy_interface_is_rgmii(phydev)) {
+		ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+			(dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+		if (ret)
+			return ret;
+	}
+
+	if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+	    (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
+		val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
+					    DP83867_DEVADDR, phydev->addr);
+
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+			val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+			val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+			val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+
+		phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
+				       DP83867_DEVADDR, phydev->addr, val);
+
+		delay = (dp83867->rx_id_delay |
+			(dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+
+		phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
+				       DP83867_DEVADDR, phydev->addr, delay);
+	}
+
+	return 0;
+}
+
+static int dp83867_phy_reset(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+	if (err < 0)
+		return err;
+
+	return dp83867_config_init(phydev);
+}
+
+static struct phy_driver dp83867_driver[] = {
+	{
+		.phy_id		= DP83867_PHY_ID,
+		.phy_id_mask	= 0xfffffff0,
+		.name		= "TI DP83867",
+		.features	= PHY_GBIT_FEATURES,
+		.flags		= PHY_HAS_INTERRUPT,
+
+		.config_init	= dp83867_config_init,
+		.soft_reset	= dp83867_phy_reset,
+
+		/* IRQ related */
+		.ack_interrupt	= dp83867_ack_interrupt,
+		.config_intr	= dp83867_config_intr,
+
+		.config_aneg	= genphy_config_aneg,
+		.read_status	= genphy_read_status,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+
+		.driver		= {.owner = THIS_MODULE,}
+	},
+};
+module_phy_driver(dp83867_driver);
+
+static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+	{ DP83867_PHY_ID, 0xfffffff0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 8644f03..0dbc445 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -139,10 +139,7 @@
 	if (c < 0)
 		return c;
 
-	if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+	if (phy_interface_is_rgmii(phydev)) {
 
 		c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
 		if (c < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1b1698f..f721444 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -317,10 +317,7 @@
 	if (err < 0)
 		return err;
 
-	if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+	if (phy_interface_is_rgmii(phydev)) {
 
 		mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
 			MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -469,10 +466,7 @@
 	int err;
 	int temp;
 
-	if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+	if (phy_interface_is_rgmii(phydev)) {
 
 		temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
 		if (temp < 0)
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 414fdf1..fc7abc5 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -81,7 +81,13 @@
 		return -ETIMEDOUT;
 
 	cmd = __raw_readl(priv->base + MDIO_CMD);
-	if (cmd & MDIO_READ_FAIL)
+
+	/* Some broken devices are known not to release the line during
+	 * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+	 * that condition here and ignore the MDIO controller read failure
+	 * indication.
+	 */
+	if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
 		return -EIO;
 
 	return cmd & 0xffff;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index daec9b0..61a543c 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -165,8 +165,11 @@
 
 	ctrl->ops->set_mdio_dir(ctrl, 0);
 
-	/* check the turnaround bit: the PHY should be driving it to zero */
-	if (mdiobb_get_bit(ctrl) != 0) {
+	/* check the turnaround bit: the PHY should be driving it to zero, if this
+	 * PHY is listed in phy_ignore_ta_mask as having broken TA, skip that
+	 */
+	if (mdiobb_get_bit(ctrl) != 0 &&
+	    !(bus->phy_ignore_ta_mask & (1 << phy))) {
 		/* PHY didn't drive TA low -- flush any bits it
 		 * may be trying to send.
 		 */
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 53d1815..7dc21e5 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -158,6 +158,7 @@
 	new_bus->name = "GPIO Bitbanged MDIO",
 
 	new_bus->phy_mask = pdata->phy_mask;
+	new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
 	new_bus->irq = pdata->irqs;
 	new_bus->parent = dev;
 
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ebdc357..499185e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -288,9 +288,10 @@
 }
 
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
-				       struct device_node *of_node, u16 reg,
-				       char *field1, char *field2,
-				       char *field3, char *field4)
+				       const struct device_node *of_node,
+				       u16 reg,
+				       const char *field1, const char *field2,
+				       const char *field3, const char *field4)
 {
 	int val1 = -1;
 	int val2 = -2;
@@ -336,8 +337,8 @@
 
 static int ksz9021_config_init(struct phy_device *phydev)
 {
-	struct device *dev = &phydev->dev;
-	struct device_node *of_node = dev->of_node;
+	const struct device *dev = &phydev->dev;
+	const struct device_node *of_node = dev->of_node;
 
 	if (!of_node && dev->parent->of_node)
 		of_node = dev->parent->of_node;
@@ -365,6 +366,11 @@
 #define KSZ9031_PS_TO_REG		60
 
 /* Extended registers */
+/* MMD Address 0x0 */
+#define MII_KSZ9031RN_FLP_BURST_TX_LO	3
+#define MII_KSZ9031RN_FLP_BURST_TX_HI	4
+
+/* MMD Address 0x2 */
 #define MII_KSZ9031RN_CONTROL_PAD_SKEW	4
 #define MII_KSZ9031RN_RX_DATA_PAD_SKEW	5
 #define MII_KSZ9031RN_TX_DATA_PAD_SKEW	6
@@ -389,9 +395,9 @@
 }
 
 static int ksz9031_of_load_skew_values(struct phy_device *phydev,
-				       struct device_node *of_node,
+				       const struct device_node *of_node,
 				       u16 reg, size_t field_sz,
-				       char *field[], u8 numfields)
+				       const char *field[], u8 numfields)
 {
 	int val[4] = {-1, -2, -3, -4};
 	int matches = 0;
@@ -425,20 +431,36 @@
 	return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
 }
 
+static int ksz9031_center_flp_timing(struct phy_device *phydev)
+{
+	int result;
+
+	/* Center KSZ9031RNX FLP timing at 16ms. */
+	result = ksz9031_extended_write(phydev, OP_DATA, 0,
+					MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+	result = ksz9031_extended_write(phydev, OP_DATA, 0,
+					MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+
+	if (result)
+		return result;
+
+	return genphy_restart_aneg(phydev);
+}
+
 static int ksz9031_config_init(struct phy_device *phydev)
 {
-	struct device *dev = &phydev->dev;
-	struct device_node *of_node = dev->of_node;
-	char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
-	char *rx_data_skews[4] = {
+	const struct device *dev = &phydev->dev;
+	const struct device_node *of_node = dev->of_node;
+	static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+	static const char *rx_data_skews[4] = {
 		"rxd0-skew-ps", "rxd1-skew-ps",
 		"rxd2-skew-ps", "rxd3-skew-ps"
 	};
-	char *tx_data_skews[4] = {
+	static const char *tx_data_skews[4] = {
 		"txd0-skew-ps", "txd1-skew-ps",
 		"txd2-skew-ps", "txd3-skew-ps"
 	};
-	char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+	static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
 
 	if (!of_node && dev->parent->of_node)
 		of_node = dev->parent->of_node;
@@ -460,7 +482,8 @@
 				MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
 				tx_data_skews, 4);
 	}
-	return 0;
+
+	return ksz9031_center_flp_timing(phydev);
 }
 
 #define KSZ8873MLL_GLOBAL_CONTROL_4	0x06
@@ -519,7 +542,7 @@
 static int kszphy_probe(struct phy_device *phydev)
 {
 	const struct kszphy_type *type = phydev->drv->driver_data;
-	struct device_node *np = phydev->dev.of_node;
+	const struct device_node *np = phydev->dev.of_node;
 	struct kszphy_priv *priv;
 	struct clk *clk;
 	int ret;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 47cd578..b2197b5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -58,6 +58,31 @@
 	}
 }
 
+#define PHY_STATE_STR(_state)			\
+	case PHY_##_state:			\
+		return __stringify(_state);	\
+
+static const char *phy_state_to_str(enum phy_state st)
+{
+	switch (st) {
+	PHY_STATE_STR(DOWN)
+	PHY_STATE_STR(STARTING)
+	PHY_STATE_STR(READY)
+	PHY_STATE_STR(PENDING)
+	PHY_STATE_STR(UP)
+	PHY_STATE_STR(AN)
+	PHY_STATE_STR(RUNNING)
+	PHY_STATE_STR(NOLINK)
+	PHY_STATE_STR(FORCING)
+	PHY_STATE_STR(CHANGELINK)
+	PHY_STATE_STR(HALTED)
+	PHY_STATE_STR(RESUMING)
+	}
+
+	return NULL;
+}
+
+
 /**
  * phy_print_status - Convenience function to print out the current phy status
  * @phydev: the phy_device struct
@@ -784,10 +809,13 @@
 	struct phy_device *phydev =
 			container_of(dwork, struct phy_device, state_queue);
 	bool needs_aneg = false, do_suspend = false;
+	enum phy_state old_state;
 	int err = 0;
 
 	mutex_lock(&phydev->lock);
 
+	old_state = phydev->state;
+
 	if (phydev->drv->link_change_notify)
 		phydev->drv->link_change_notify(phydev);
 
@@ -952,6 +980,9 @@
 	if (err < 0)
 		phy_error(phydev);
 
+	dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
+		phy_state_to_str(old_state), phy_state_to_str(phydev->state));
+
 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
 			   PHY_STATE_TIME * HZ);
 }
@@ -1062,8 +1093,7 @@
 	if ((phydev->duplex == DUPLEX_FULL) &&
 	    ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
 	    (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
-	    (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
-	     phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+	     phy_interface_is_rgmii(phydev) ||
 	     phy_is_internal(phydev))) {
 		int eee_lp, eee_cap, eee_adv;
 		u32 lp, cap, adv;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 96a0f0f..4535361 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -22,8 +22,12 @@
 #define RTL821x_INER		0x12
 #define RTL821x_INER_INIT	0x6400
 #define RTL821x_INSR		0x13
+#define RTL8211E_INER_LINK_STATUS 0x400
 
-#define	RTL8211E_INER_LINK_STATUS	0x400
+#define RTL8211F_INER_LINK_STATUS 0x0010
+#define RTL8211F_INSR		0x1d
+#define RTL8211F_PAGE_SELECT	0x1f
+#define RTL8211F_TX_DELAY	0x100
 
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
@@ -38,6 +42,18 @@
 	return (err < 0) ? err : 0;
 }
 
+static int rtl8211f_ack_interrupt(struct phy_device *phydev)
+{
+	int err;
+
+	phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43);
+	err = phy_read(phydev, RTL8211F_INSR);
+	/* restore to default page 0 */
+	phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
+
+	return (err < 0) ? err : 0;
+}
+
 static int rtl8211b_config_intr(struct phy_device *phydev)
 {
 	int err;
@@ -64,6 +80,41 @@
 	return err;
 }
 
+static int rtl8211f_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, RTL821x_INER,
+				RTL8211F_INER_LINK_STATUS);
+	else
+		err = phy_write(phydev, RTL821x_INER, 0);
+
+	return err;
+}
+
+static int rtl8211f_config_init(struct phy_device *phydev)
+{
+	int ret;
+	u16 reg;
+
+	ret = genphy_config_init(phydev);
+	if (ret < 0)
+		return ret;
+
+	if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+		/* enable TXDLY */
+		phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
+		reg = phy_read(phydev, 0x11);
+		reg |= RTL8211F_TX_DELAY;
+		phy_write(phydev, 0x11, reg);
+		/* restore to default page 0 */
+		phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
+	}
+
+	return 0;
+}
+
 static struct phy_driver realtek_drvs[] = {
 	{
 		.phy_id         = 0x00008201,
@@ -98,6 +149,20 @@
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
 		.driver		= { .owner = THIS_MODULE,},
+	}, {
+		.phy_id		= 0x001cc916,
+		.name		= "RTL8211F Gigabit Ethernet",
+		.phy_id_mask	= 0x001fffff,
+		.features	= PHY_GBIT_FEATURES,
+		.flags		= PHY_HAS_INTERRUPT,
+		.config_aneg	= &genphy_config_aneg,
+		.config_init	= &rtl8211f_config_init,
+		.read_status	= &genphy_read_status,
+		.ack_interrupt	= &rtl8211f_ack_interrupt,
+		.config_intr	= &rtl8211f_config_intr,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.driver		= { .owner = THIS_MODULE },
 	},
 };
 
@@ -106,6 +171,7 @@
 static struct mdio_device_id __maybe_unused realtek_tbl[] = {
 	{ 0x001cc912, 0x001fffff },
 	{ 0x001cc915, 0x001fffff },
+	{ 0x001cc916, 0x001fffff },
 	{ }
 };
 
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b62a5e3..3837ae3 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -550,11 +550,11 @@
  * Initialize a new struct sock.
  *
  **********************************************************************/
-static int pppoe_create(struct net *net, struct socket *sock)
+static int pppoe_create(struct net *net, struct socket *sock, int kern)
 {
 	struct sock *sk;
 
-	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 2940e9f..0e1b306 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -118,7 +118,7 @@
 	    !try_module_get(pppox_protos[protocol]->owner))
 		goto out;
 
-	rc = pppox_protos[protocol]->create(net, sock);
+	rc = pppox_protos[protocol]->create(net, sock, kern);
 
 	module_put(pppox_protos[protocol]->owner);
 out:
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e3bfbd4d..686f37d 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -28,8 +28,6 @@
 #include <linux/file.h>
 #include <linux/in.h>
 #include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
 #include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 
@@ -561,14 +559,14 @@
 	skb_queue_purge(&sk->sk_receive_queue);
 }
 
-static int pptp_create(struct net *net, struct socket *sock)
+static int pptp_create(struct net *net, struct socket *sock, int kern)
 {
 	int error = -ENOMEM;
 	struct sock *sk;
 	struct pppox_sock *po;
 	struct pptp_opt *opt;
 
-	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
 	if (!sk)
 		goto out;
 
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6928448..daa054b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1924,7 +1924,7 @@
 	struct team *team = netdev_priv(dev);
 	netdev_features_t mask;
 
-	mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+	mask = features;
 	features &= ~NETIF_F_ONE_FOR_ALL;
 	features |= NETIF_F_ALL_FOR_ALL;
 
@@ -1977,8 +1977,12 @@
 	.ndo_del_slave		= team_del_slave,
 	.ndo_fix_features	= team_fix_features,
 	.ndo_change_carrier     = team_change_carrier,
-	.ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
-	.ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
+	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
+	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
+	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
+	.ndo_fdb_add		= switchdev_port_fdb_add,
+	.ndo_fdb_del		= switchdev_port_fdb_del,
+	.ndo_fdb_dump		= switchdev_port_fdb_dump,
 	.ndo_features_check	= passthru_features_check,
 };
 
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e470ae5..1a1c4f7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -146,7 +146,6 @@
 	struct socket socket;
 	struct socket_wq wq;
 	struct tun_struct __rcu *tun;
-	struct net *net;
 	struct fasync_struct *fasync;
 	/* only used for fasnyc */
 	unsigned int flags;
@@ -493,10 +492,7 @@
 			    tun->dev->reg_state == NETREG_REGISTERED)
 				unregister_netdevice(tun->dev);
 		}
-
-		BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
-				 &tfile->socket.flags));
-		sk_release_kernel(&tfile->sk);
+		sock_put(&tfile->sk);
 	}
 }
 
@@ -1492,18 +1488,10 @@
 	return ret;
 }
 
-static int tun_release(struct socket *sock)
-{
-	if (sock->sk)
-		sock_put(sock->sk);
-	return 0;
-}
-
 /* Ops structure to mimic raw sockets with tun */
 static const struct proto_ops tun_socket_ops = {
 	.sendmsg = tun_sendmsg,
 	.recvmsg = tun_recvmsg,
-	.release = tun_release,
 };
 
 static struct proto tun_proto = {
@@ -1865,7 +1853,7 @@
 	if (cmd == TUNSETIFF && !tun) {
 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
 
-		ret = tun_set_iff(tfile->net, file, &ifr);
+		ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
 
 		if (ret)
 			goto unlock;
@@ -2154,16 +2142,16 @@
 
 static int tun_chr_open(struct inode *inode, struct file * file)
 {
+	struct net *net = current->nsproxy->net_ns;
 	struct tun_file *tfile;
 
 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
 
-	tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
-					    &tun_proto);
+	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+					    &tun_proto, 0);
 	if (!tfile)
 		return -ENOMEM;
 	RCU_INIT_POINTER(tfile->tun, NULL);
-	tfile->net = get_net(current->nsproxy->net_ns);
 	tfile->flags = 0;
 	tfile->ifindex = 0;
 
@@ -2174,13 +2162,11 @@
 	tfile->socket.ops = &tun_socket_ops;
 
 	sock_init_data(&tfile->socket, &tfile->sk);
-	sk_change_net(&tfile->sk, tfile->net);
 
 	tfile->sk.sk_write_space = tun_sock_write_space;
 	tfile->sk.sk_sndbuf = INT_MAX;
 
 	file->private_data = tfile;
-	set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
 	INIT_LIST_HEAD(&tfile->next);
 
 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
@@ -2191,10 +2177,8 @@
 static int tun_chr_close(struct inode *inode, struct file *file)
 {
 	struct tun_file *tfile = file->private_data;
-	struct net *net = tfile->net;
 
 	tun_detach(tfile, true);
-	put_net(net);
 
 	return 0;
 }
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 3718d02..221a530 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
 /*
  * Linux driver for VMware's vmxnet3 ethernet NIC.
  *
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2015, VMware, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -277,6 +277,40 @@
 #endif  /* __BIG_ENDIAN_BITFIELD */
 };
 
+struct Vmxnet3_RxCompDescExt {
+	__le32		dword1;
+	u8		segCnt;       /* Number of aggregated packets */
+	u8		dupAckCnt;    /* Number of duplicate Acks */
+	__le16		tsDelta;      /* TCP timestamp difference */
+	__le32		dword2;
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32		gen:1;        /* generation bit */
+	u32		type:7;       /* completion type */
+	u32		fcs:1;        /* Frame CRC correct */
+	u32		frg:1;        /* IP Fragment */
+	u32		v4:1;         /* IPv4 */
+	u32		v6:1;         /* IPv6 */
+	u32		ipc:1;        /* IP Checksum Correct */
+	u32		tcp:1;        /* TCP packet */
+	u32		udp:1;        /* UDP packet */
+	u32		tuc:1;        /* TCP/UDP Checksum Correct */
+	u32		mss:16;
+#else
+	u32		mss:16;
+	u32		tuc:1;        /* TCP/UDP Checksum Correct */
+	u32		udp:1;        /* UDP packet */
+	u32		tcp:1;        /* TCP packet */
+	u32		ipc:1;        /* IP Checksum Correct */
+	u32		v6:1;         /* IPv6 */
+	u32		v4:1;         /* IPv4 */
+	u32		frg:1;        /* IP Fragment */
+	u32		fcs:1;        /* Frame CRC correct */
+	u32		type:7;       /* completion type */
+	u32		gen:1;        /* generation bit */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+};
+
+
 /* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
 #define VMXNET3_RCD_TUC_SHIFT	16
 #define VMXNET3_RCD_IPC_SHIFT	19
@@ -310,6 +344,7 @@
 	struct Vmxnet3_RxDesc		rxd;
 	struct Vmxnet3_TxCompDesc	tcd;
 	struct Vmxnet3_RxCompDesc	rcd;
+	struct Vmxnet3_RxCompDescExt 	rcdExt;
 };
 
 #define VMXNET3_INIT_GEN       1
@@ -361,6 +396,7 @@
 /* completion descriptor types */
 #define VMXNET3_CDTYPE_TXCOMP      0    /* Tx Completion Descriptor */
 #define VMXNET3_CDTYPE_RXCOMP      3    /* Rx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP_LRO  4    /* Rx Completion Descriptor for LRO */
 
 enum {
 	VMXNET3_GOS_BITS_UNK    = 0,   /* unknown */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 61c0840..da11bb5 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -861,6 +861,9 @@
 					     , skb_headlen(skb));
 		}
 
+		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
+			ctx->copy_size = skb->len;
+
 		/* make sure headers are accessible directly */
 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
 			goto err;
@@ -1160,6 +1163,52 @@
 }
 
 
+static u32
+vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
+		    union Vmxnet3_GenericDesc *gdesc)
+{
+	u32 hlen, maplen;
+	union {
+		void *ptr;
+		struct ethhdr *eth;
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+		struct tcphdr *tcp;
+	} hdr;
+	BUG_ON(gdesc->rcd.tcp == 0);
+
+	maplen = skb_headlen(skb);
+	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
+		return 0;
+
+	hdr.eth = eth_hdr(skb);
+	if (gdesc->rcd.v4) {
+		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
+		hdr.ptr += sizeof(struct ethhdr);
+		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
+		hlen = hdr.ipv4->ihl << 2;
+		hdr.ptr += hdr.ipv4->ihl << 2;
+	} else if (gdesc->rcd.v6) {
+		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
+		hdr.ptr += sizeof(struct ethhdr);
+		/* Use an estimated value, since we also need to handle
+		 * TSO case.
+		 */
+		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+			return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+		hlen = sizeof(struct ipv6hdr);
+		hdr.ptr += sizeof(struct ipv6hdr);
+	} else {
+		/* Non-IP pkt, dont estimate header length */
+		return 0;
+	}
+
+	if (hlen + sizeof(struct tcphdr) > maplen)
+		return 0;
+
+	return (hlen + (hdr.tcp->doff << 2));
+}
+
 static int
 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 		       struct vmxnet3_adapter *adapter, int quota)
@@ -1171,6 +1220,7 @@
 	bool skip_page_frags = false;
 	struct Vmxnet3_RxCompDesc *rcd;
 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
+	u16 segCnt = 0, mss = 0;
 #ifdef __BIG_ENDIAN_BITFIELD
 	struct Vmxnet3_RxDesc rxCmdDesc;
 	struct Vmxnet3_RxCompDesc rxComp;
@@ -1259,7 +1309,19 @@
 						       PCI_DMA_FROMDEVICE);
 			rxd->addr = cpu_to_le64(rbi->dma_addr);
 			rxd->len = rbi->len;
+			if (adapter->version == 2 &&
+			    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+				struct Vmxnet3_RxCompDescExt *rcdlro;
+				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
 
+				segCnt = rcdlro->segCnt;
+				BUG_ON(segCnt <= 1);
+				mss = rcdlro->mss;
+				if (unlikely(segCnt <= 1))
+					segCnt = 0;
+			} else {
+				segCnt = 0;
+			}
 		} else {
 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
 
@@ -1273,47 +1335,75 @@
 			if (skip_page_frags)
 				goto rcd_done;
 
-			new_page = alloc_page(GFP_ATOMIC);
-			if (unlikely(new_page == NULL)) {
+			if (rcd->len) {
+				new_page = alloc_page(GFP_ATOMIC);
 				/* Replacement page frag could not be allocated.
 				 * Reuse this page. Drop the pkt and free the
 				 * skb which contained this page as a frag. Skip
 				 * processing all the following non-sop frags.
 				 */
-				rq->stats.rx_buf_alloc_failure++;
-				dev_kfree_skb(ctx->skb);
-				ctx->skb = NULL;
-				skip_page_frags = true;
-				goto rcd_done;
-			}
+				if (unlikely(!new_page)) {
+					rq->stats.rx_buf_alloc_failure++;
+					dev_kfree_skb(ctx->skb);
+					ctx->skb = NULL;
+					skip_page_frags = true;
+					goto rcd_done;
+				}
 
-			if (rcd->len) {
 				dma_unmap_page(&adapter->pdev->dev,
 					       rbi->dma_addr, rbi->len,
 					       PCI_DMA_FROMDEVICE);
 
 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
-			}
 
-			/* Immediate refill */
-			rbi->page = new_page;
-			rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
-						     rbi->page,
-						     0, PAGE_SIZE,
-						     PCI_DMA_FROMDEVICE);
-			rxd->addr = cpu_to_le64(rbi->dma_addr);
-			rxd->len = rbi->len;
+				/* Immediate refill */
+				rbi->page = new_page;
+				rbi->dma_addr = dma_map_page(&adapter->pdev->dev
+							, rbi->page,
+							0, PAGE_SIZE,
+							PCI_DMA_FROMDEVICE);
+				rxd->addr = cpu_to_le64(rbi->dma_addr);
+				rxd->len = rbi->len;
+			}
 		}
 
 
 		skb = ctx->skb;
 		if (rcd->eop) {
+			u32 mtu = adapter->netdev->mtu;
 			skb->len += skb->data_len;
 
 			vmxnet3_rx_csum(adapter, skb,
 					(union Vmxnet3_GenericDesc *)rcd);
 			skb->protocol = eth_type_trans(skb, adapter->netdev);
+			if (!rcd->tcp || !adapter->lro)
+				goto not_lro;
 
+			if (segCnt != 0 && mss != 0) {
+				skb_shinfo(skb)->gso_type = rcd->v4 ?
+					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+				skb_shinfo(skb)->gso_size = mss;
+				skb_shinfo(skb)->gso_segs = segCnt;
+			} else if (segCnt != 0 || skb->len > mtu) {
+				u32 hlen;
+
+				hlen = vmxnet3_get_hdr_len(adapter, skb,
+					(union Vmxnet3_GenericDesc *)rcd);
+				if (hlen == 0)
+					goto not_lro;
+
+				skb_shinfo(skb)->gso_type =
+					rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+				if (segCnt != 0) {
+					skb_shinfo(skb)->gso_segs = segCnt;
+					skb_shinfo(skb)->gso_size =
+						DIV_ROUND_UP(skb->len -
+							hlen, segCnt);
+				} else {
+					skb_shinfo(skb)->gso_size = mtu - hlen;
+				}
+			}
+not_lro:
 			if (unlikely(rcd->ts))
 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
 
@@ -3038,14 +3128,19 @@
 		goto err_alloc_pci;
 
 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
-	if (ver & 1) {
+	if (ver & 2) {
+		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2);
+		adapter->version = 2;
+	} else if (ver & 1) {
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
+		adapter->version = 1;
 	} else {
 		dev_err(&pdev->dev,
 			"Incompatible h/w version (0x%x) for adapter\n", ver);
 		err = -EBUSY;
 		goto err_ver;
 	}
+	dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
 
 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
 	if (ver & 1) {
@@ -3184,6 +3279,32 @@
 	free_netdev(netdev);
 }
 
+static void vmxnet3_shutdown_device(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+
+	/* Reset_work may be in the middle of resetting the device, wait for its
+	 * completion.
+	 */
+	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
+			     &adapter->state)) {
+		clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+		return;
+	}
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
+	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+			       VMXNET3_CMD_QUIESCE_DEV);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+	vmxnet3_disable_all_intrs(adapter);
+
+	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+}
+
 
 #ifdef CONFIG_PM
 
@@ -3360,6 +3481,7 @@
 	.id_table	= vmxnet3_pciid_table,
 	.probe		= vmxnet3_probe_device,
 	.remove		= vmxnet3_remove_device,
+	.shutdown	= vmxnet3_shutdown_device,
 #ifdef CONFIG_PM
 	.driver.pm	= &vmxnet3_pm_ops,
 #endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 6bb769a..e9f1075 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -328,6 +328,10 @@
 
 	u8			__iomem *hw_addr0; /* for BAR 0 */
 	u8			__iomem *hw_addr1; /* for BAR 1 */
+	u8                              version;
+
+	bool				rxcsum;
+	bool				lro;
 
 #ifdef VMXNET3_RSS
 	struct UPT1_RSSConf		*rss_conf;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 21a0fbf..34c519e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -336,7 +336,7 @@
 
 	if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
 	    nla_put_s32(skb, NDA_LINK_NETNSID,
-			peernet2id(dev_net(vxlan->dev), vxlan->net)))
+			peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
 		goto nla_put_failure;
 
 	if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
@@ -1921,6 +1921,8 @@
 		memset(&fl4, 0, sizeof(fl4));
 		fl4.flowi4_oif = rdst->remote_ifindex;
 		fl4.flowi4_tos = RT_TOS(tos);
+		fl4.flowi4_mark = skb->mark;
+		fl4.flowi4_proto = IPPROTO_UDP;
 		fl4.daddr = dst->sin.sin_addr.s_addr;
 		fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
 
@@ -1981,6 +1983,7 @@
 		fl6.flowi6_oif = rdst->remote_ifindex;
 		fl6.daddr = dst->sin6.sin6_addr;
 		fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+		fl6.flowi6_mark = skb->mark;
 		fl6.flowi6_proto = IPPROTO_UDP;
 
 		if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
@@ -2128,9 +2131,10 @@
 	if (!netif_running(vxlan->dev))
 		return;
 
-	spin_lock_bh(&vxlan->hash_lock);
 	for (h = 0; h < FDB_HASH_SIZE; ++h) {
 		struct hlist_node *p, *n;
+
+		spin_lock_bh(&vxlan->hash_lock);
 		hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
 			struct vxlan_fdb *f
 				= container_of(p, struct vxlan_fdb, hlist);
@@ -2149,8 +2153,8 @@
 			} else if (time_before(timeout, next_timer))
 				next_timer = timeout;
 		}
+		spin_unlock_bh(&vxlan->hash_lock);
 	}
-	spin_unlock_bh(&vxlan->hash_lock);
 
 	mod_timer(&vxlan->age_timer, next_timer);
 }
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index bcfa01a..7193b73 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -517,7 +517,7 @@
 		 */
 		set_current_state(TASK_INTERRUPTIBLE);
 		cosa_putstatus(cosa, SR_TX_INT_ENA);
-		schedule_timeout(30);
+		schedule_timeout(msecs_to_jiffies(300));
 		irq = probe_irq_off(irqs);
 		/* Disable all IRQs from the card */
 		cosa_putstatus(cosa, 0);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 0822356..7a72407 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -551,7 +551,7 @@
 			       msg, i);
 			goto done;
 		}
-		schedule_timeout_uninterruptible(10);
+		schedule_timeout_uninterruptible(msecs_to_jiffies(100));
 		rmb();
 	} while (++i > 0);
 	netdev_err(dev, "%s timeout\n", msg);
@@ -596,7 +596,7 @@
 		    (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
 			break;
 		smp_rmb();
-		schedule_timeout_uninterruptible(10);
+		schedule_timeout_uninterruptible(msecs_to_jiffies(100));
 	} while (++i > 0);
 
 	return (i >= 0 ) ? i : -EAGAIN;
@@ -1033,7 +1033,7 @@
 	/* Flush posted writes */
 	readl(ioaddr + GSTAR);
 
-	schedule_timeout_uninterruptible(10);
+	schedule_timeout_uninterruptible(msecs_to_jiffies(100));
 
 	for (i = 0; i < 16; i++)
 		pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1046,7 +1046,6 @@
 static int dscc4_open(struct net_device *dev)
 {
 	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
-	struct dscc4_pci_priv *ppriv;
 	int ret = -EAGAIN;
 
 	if ((dscc4_loopback_check(dpriv) < 0))
@@ -1055,8 +1054,6 @@
 	if ((ret = hdlc_open(dev)))
 		goto err;
 
-	ppriv = dpriv->pci_priv;
-
 	/*
 	 * Due to various bugs, there is no way to reliably reset a
 	 * specific port (manufacturer's dependent special PCI #RST wiring
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 2f5eda8..6676607 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -40,7 +40,6 @@
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
 #include <linux/stat.h>
-#include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/lapb.h>
 #include <linux/init.h>
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 16604bd..a63ab2e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -277,6 +277,7 @@
 source "drivers/net/wireless/orinoco/Kconfig"
 source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/rt2x00/Kconfig"
+source "drivers/net/wireless/mediatek/Kconfig"
 source "drivers/net/wireless/rtlwifi/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0c88916..6b9e729 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -45,6 +45,8 @@
 obj-$(CONFIG_IWLEGACY)	+= iwlegacy/
 obj-$(CONFIG_RT2X00)	+= rt2x00/
 
+obj-$(CONFIG_WL_MEDIATEK)	+= mediatek/
+
 obj-$(CONFIG_P54_COMMON)	+= p54/
 
 obj-$(CONFIG_ATH_CARDS)		+= ath/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f07a618..15f057e 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1098,14 +1098,18 @@
 		pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
 
 		switch (cline) {
-		case  0x8: reg |= (0x1 << 14);
-			   break;
-		case 0x16: reg |= (0x2 << 14);
-			   break;
-		case 0x32: reg |= (0x3 << 14);
-			   break;
-		  default: reg |= (0x0 << 14);
-			   break;
+		case  0x8:
+			reg |= (0x1 << 14);
+			break;
+		case 0x10:
+			reg |= (0x2 << 14);
+			break;
+		case 0x20:
+			reg |= (0x3 << 14);
+			break;
+		default:
+			reg |= (0x0 << 14);
+			break;
 		}
 	}
 
@@ -1353,12 +1357,7 @@
 
 	new_flags = 0;
 
-	if (*total_flags & FIF_PROMISC_IN_BSS) {
-		new_flags |= FIF_PROMISC_IN_BSS;
-		priv->nar |= ADM8211_NAR_PR;
-		priv->nar &= ~ADM8211_NAR_MM;
-		mc_filter[1] = mc_filter[0] = ~0;
-	} else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
+	if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
 		new_flags |= FIF_ALLMULTI;
 		priv->nar &= ~ADM8211_NAR_PR;
 		priv->nar |= ADM8211_NAR_MM;
@@ -1374,9 +1373,9 @@
 	ADM8211_CSR_READ(NAR);
 
 	if (priv->nar & ADM8211_NAR_PR)
-		dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+		ieee80211_hw_set(dev, RX_INCLUDES_FCS);
 	else
-		dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+		__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
 
 	if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
 		adm8211_set_bssid(dev, bcast);
@@ -1862,8 +1861,8 @@
 	SET_IEEE80211_PERM_ADDR(dev, perm_addr);
 
 	dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
-	/* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
-	dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
+	/* dev->flags = RX_INCLUDES_FCS in promisc mode */
+	ieee80211_hw_set(dev, SIGNAL_UNSPEC);
 	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 
 	dev->max_signal = 100;    /* FIXME: find better value */
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 49219c5..dab2513 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2360,8 +2360,8 @@
 	priv->hw->wiphy->max_scan_ie_len = 0;
 	priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 	priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
-	priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-			  IEEE80211_HW_SIGNAL_UNSPEC;
+	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
 	priv->hw->max_signal = 100;
 
 	SET_IEEE80211_DEV(priv->hw, &interface->dev);
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 55090a3..ae03271 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -447,7 +447,7 @@
 	int mac80211_registered;
 };
 
-#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
+#define AT76_SUPPORTED_FILTERS 0
 
 #define SCAN_POLL_INTERVAL	(HZ / 4)
 
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 5147ebe..3b343c6 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1319,8 +1319,7 @@
 
 }
 
-#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
-				  FIF_ALLMULTI | \
+#define AR5523_SUPPORTED_FILTERS (FIF_ALLMULTI | \
 				  FIF_FCSFAIL | \
 				  FIF_OTHER_BSS)
 
@@ -1683,9 +1682,9 @@
 			(id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
 
 	ar->vif = NULL;
-	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		    IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_HAS_RATE_CONTROL;
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
 	hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
 				sizeof(struct ar5523_chunk);
 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7e94810..65ef483 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -251,6 +251,7 @@
  * @ATH_DBG_DFS: radar datection
  * @ATH_DBG_WOW: Wake on Wireless
  * @ATH_DBG_DYNACK: dynack handling
+ * @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
  * @ATH_DBG_ANY: enable all debugging
  *
  * The debug level is used to control the amount and type of debugging output
@@ -280,6 +281,7 @@
 	ATH_DBG_WOW		= 0x00020000,
 	ATH_DBG_CHAN_CTX	= 0x00040000,
 	ATH_DBG_DYNACK		= 0x00080000,
+	ATH_DBG_SPECTRAL_SCAN	= 0x00100000,
 	ATH_DBG_ANY		= 0xffffffff
 };
 
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index f4dbb3e..9729e69 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -10,13 +10,15 @@
 		 wmi.o \
 		 wmi-tlv.o \
 		 bmi.o \
-		 hw.o
+		 hw.o \
+		 p2p.o
 
 ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
 ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
 
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c0e454b..59496a9 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -48,6 +48,7 @@
 		.name = "qca988x hw2.0",
 		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
 		.uart_pin = 7,
+		.has_shifted_cc_wraparound = true,
 		.fw = {
 			.dir = QCA988X_HW_2_0_FW_DIR,
 			.fw = QCA988X_HW_2_0_FW_FILE,
@@ -387,7 +388,9 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
-	if (!skip_otp && result != 0) {
+	if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+				   ar->fw_features))
+	    && result != 0) {
 		ath10k_err(ar, "otp calibration failed: %d", result);
 		return -EINVAL;
 	}
@@ -482,6 +485,71 @@
 	return 0;
 }
 
+static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
+{
+	char filename[100];
+
+	scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
+		  ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
+
+	ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+	if (IS_ERR(ar->board))
+		return PTR_ERR(ar->board);
+
+	ar->board_data = ar->board->data;
+	ar->board_len = ar->board->size;
+	ar->spec_board_loaded = true;
+
+	return 0;
+}
+
+static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+{
+	if (!ar->hw_params.fw.board) {
+		ath10k_err(ar, "failed to find board file fw entry\n");
+		return -EINVAL;
+	}
+
+	ar->board = ath10k_fetch_fw_file(ar,
+					 ar->hw_params.fw.dir,
+					 ar->hw_params.fw.board);
+	if (IS_ERR(ar->board))
+		return PTR_ERR(ar->board);
+
+	ar->board_data = ar->board->data;
+	ar->board_len = ar->board->size;
+	ar->spec_board_loaded = false;
+
+	return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+	int ret;
+
+	if (strlen(ar->spec_board_id) > 0) {
+		ret = ath10k_core_fetch_spec_board_file(ar);
+		if (ret) {
+			ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
+				    ret);
+			goto generic;
+		}
+
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
+			   ar->spec_board_id);
+		return 0;
+	}
+
+generic:
+	ret = ath10k_core_fetch_generic_board_file(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
 {
 	int ret = 0;
@@ -491,23 +559,6 @@
 		return -EINVAL;
 	}
 
-	if (ar->hw_params.fw.board == NULL) {
-		ath10k_err(ar, "board data file not defined");
-		return -EINVAL;
-	}
-
-	ar->board = ath10k_fetch_fw_file(ar,
-					 ar->hw_params.fw.dir,
-					 ar->hw_params.fw.board);
-	if (IS_ERR(ar->board)) {
-		ret = PTR_ERR(ar->board);
-		ath10k_err(ar, "could not fetch board data (%d)\n", ret);
-		goto err;
-	}
-
-	ar->board_data = ar->board->data;
-	ar->board_len = ar->board->size;
-
 	ar->firmware = ath10k_fetch_fw_file(ar,
 					    ar->hw_params.fw.dir,
 					    ar->hw_params.fw.fw);
@@ -675,6 +726,17 @@
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
 				   ar->wmi.op_version);
 			break;
+		case ATH10K_FW_IE_HTT_OP_VERSION:
+			if (ie_len != sizeof(u32))
+				break;
+
+			version = (__le32 *)data;
+
+			ar->htt.op_version = le32_to_cpup(version);
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+				   ar->htt.op_version);
+			break;
 		default:
 			ath10k_warn(ar, "Unknown FW IE: %u\n",
 				    le32_to_cpu(hdr->id));
@@ -695,27 +757,6 @@
 		goto err;
 	}
 
-	/* now fetch the board file */
-	if (ar->hw_params.fw.board == NULL) {
-		ath10k_err(ar, "board data file not defined");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	ar->board = ath10k_fetch_fw_file(ar,
-					 ar->hw_params.fw.dir,
-					 ar->hw_params.fw.board);
-	if (IS_ERR(ar->board)) {
-		ret = PTR_ERR(ar->board);
-		ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
-			   ar->hw_params.fw.dir, ar->hw_params.fw.board,
-			   ret);
-		goto err;
-	}
-
-	ar->board_data = ar->board->data;
-	ar->board_len = ar->board->size;
-
 	return 0;
 
 err:
@@ -730,6 +771,19 @@
 	/* calibration file is optional, don't check for any errors */
 	ath10k_fetch_cal_file(ar);
 
+	ret = ath10k_core_fetch_board_file(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+		return ret;
+	}
+
+	ar->fw_api = 5;
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+	if (ret == 0)
+		goto success;
+
 	ar->fw_api = 4;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
@@ -958,6 +1012,8 @@
 		ar->max_num_stations = TARGET_NUM_STATIONS;
 		ar->max_num_vdevs = TARGET_NUM_VDEVS;
 		ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+			WMI_STAT_PEER;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -966,12 +1022,17 @@
 		ar->max_num_stations = TARGET_10X_NUM_STATIONS;
 		ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
 		ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PEER;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_TLV:
 		ar->max_num_peers = TARGET_TLV_NUM_PEERS;
 		ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
 		ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+		ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
 		ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+		ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+			WMI_STAT_PEER;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -979,6 +1040,29 @@
 		return -EINVAL;
 	}
 
+	/* Backwards compatibility for firmwares without
+	 * ATH10K_FW_IE_HTT_OP_VERSION.
+	 */
+	if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+		switch (ar->wmi.op_version) {
+		case ATH10K_FW_WMI_OP_VERSION_MAIN:
+			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+			break;
+		case ATH10K_FW_WMI_OP_VERSION_10_1:
+		case ATH10K_FW_WMI_OP_VERSION_10_2:
+		case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+			break;
+		case ATH10K_FW_WMI_OP_VERSION_TLV:
+			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+			break;
+		case ATH10K_FW_WMI_OP_VERSION_UNSET:
+		case ATH10K_FW_WMI_OP_VERSION_MAX:
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
@@ -1001,6 +1085,22 @@
 	if (status)
 		goto err;
 
+	/* Some of of qca988x solutions are having global reset issue
+         * during target initialization. Bypassing PLL setting before
+         * downloading firmware and letting the SoC run on REF_CLK is
+         * fixing the problem. Corresponding firmware change is also needed
+         * to set the clock source once the target is initialized.
+	 */
+	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+		     ar->fw_features)) {
+		status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+		if (status) {
+			ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+				   status);
+			goto err;
+		}
+	}
+
 	status = ath10k_download_fw(ar, mode);
 	if (status)
 		goto err;
@@ -1080,9 +1180,8 @@
 
 	if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
 		status = ath10k_wmi_wait_for_service_ready(ar);
-		if (status <= 0) {
+		if (status) {
 			ath10k_warn(ar, "wmi service ready event not received");
-			status = -ETIMEDOUT;
 			goto err_hif_stop;
 		}
 	}
@@ -1098,9 +1197,8 @@
 	}
 
 	status = ath10k_wmi_wait_for_unified_ready(ar);
-	if (status <= 0) {
+	if (status) {
 		ath10k_err(ar, "wmi unified ready event not received\n");
-		status = -ETIMEDOUT;
 		goto err_hif_stop;
 	}
 
@@ -1151,6 +1249,7 @@
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
 {
 	int ret;
+	unsigned long time_left;
 
 	reinit_completion(&ar->target_suspend);
 
@@ -1160,9 +1259,9 @@
 		return ret;
 	}
 
-	ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+	time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
 
-	if (ret == 0) {
+	if (!time_left) {
 		ath10k_warn(ar, "suspend timed out - target pause event never came\n");
 		return -ETIMEDOUT;
 	}
@@ -1386,6 +1485,7 @@
 	init_completion(&ar->scan.completed);
 	init_completion(&ar->scan.on_channel);
 	init_completion(&ar->target_suspend);
+	init_completion(&ar->wow.wakeup_completed);
 
 	init_completion(&ar->install_key_done);
 	init_completion(&ar->vdev_setup_done);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index f65310c3..78094f23c 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -35,6 +35,7 @@
 #include "../dfs_pattern_detector.h"
 #include "spectral.h"
 #include "thermal.h"
+#include "wow.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,15 +44,16 @@
 #define ATH10K_SCAN_ID 0
 #define WMI_READY_TIMEOUT (5 * HZ)
 #define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_NUM_CHANS 38
+#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_NUM_CHANS 39
 
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
 #define ATH10K_MAX_NUM_MGMT_PENDING 128
 
-/* number of failed packets */
-#define ATH10K_KICKOUT_THRESHOLD 50
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
 
 /*
  * Use insanely high numbers to make sure that the firmware implementation
@@ -82,6 +84,8 @@
 	dma_addr_t paddr;
 	u8 eid;
 	u8 vdev_id;
+	enum ath10k_hw_txrx_mode txmode;
+	bool is_protected;
 
 	struct {
 		u8 tid;
@@ -301,6 +305,7 @@
 	enum ath10k_beacon_state beacon_state;
 	void *beacon_buf;
 	dma_addr_t beacon_paddr;
+	unsigned long tx_paused; /* arbitrary values defined by target */
 
 	struct ath10k *ar;
 	struct ieee80211_vif *vif;
@@ -334,13 +339,13 @@
 		} ap;
 	} u;
 
-	u8 fixed_rate;
-	u8 fixed_nss;
-	u8 force_sgi;
 	bool use_cts_prot;
 	int num_legacy_stations;
 	int txpower;
 	struct wmi_wmm_params_all_arg wmm_params;
+	struct work_struct ap_csa_work;
+	struct delayed_work connection_loss_work;
+	struct cfg80211_bitrate_mask bitrate_mask;
 };
 
 struct ath10k_vif_iter {
@@ -440,6 +445,23 @@
 	 */
 	ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
 
+	/* Some firmware revisions have an incomplete WoWLAN implementation
+	 * despite WMI service bit being advertised. This feature flag is used
+	 * to distinguish whether WoWLAN is really supported or not.
+	 */
+	ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+	/* Don't trust error code from otp.bin */
+	ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+
+	/* Some firmware revisions pad 4th hw address to 4 byte boundary making
+	 * it 8 bytes long in Native Wifi Rx decap.
+	 */
+	ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+
+	/* Firmware supports bypassing PLL setting on init. */
+	ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -498,6 +520,11 @@
 	return "unknown";
 }
 
+enum ath10k_tx_pause_reason {
+	ATH10K_TX_PAUSE_Q_FULL,
+	ATH10K_TX_PAUSE_MAX,
+};
+
 struct ath10k {
 	struct ath_common ath_common;
 	struct ieee80211_hw *hw;
@@ -511,12 +538,15 @@
 	u32 fw_version_minor;
 	u16 fw_version_release;
 	u16 fw_version_build;
+	u32 fw_stats_req_mask;
 	u32 phy_capability;
 	u32 hw_min_tx_power;
 	u32 hw_max_tx_power;
 	u32 ht_cap_info;
 	u32 vht_cap_info;
 	u32 num_rf_chains;
+	/* protected by conf_mutex */
+	bool ani_enabled;
 
 	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
 
@@ -541,6 +571,13 @@
 		u32 patch_load_addr;
 		int uart_pin;
 
+		/* This is true if given HW chip has a quirky Cycle Counter
+		 * wraparound which resets to 0x7fffffff instead of 0. All
+		 * other CC related counters (e.g. Rx Clear Count) are divided
+		 * by 2 so they never wraparound themselves.
+		 */
+		bool has_shifted_cc_wraparound;
+
 		struct ath10k_hw_params_fw {
 			const char *dir;
 			const char *fw;
@@ -565,6 +602,9 @@
 
 	const struct firmware *cal_file;
 
+	char spec_board_id[100];
+	bool spec_board_loaded;
+
 	int fw_api;
 	enum ath10k_cal_mode cal_mode;
 
@@ -593,6 +633,7 @@
 	struct cfg80211_chan_def chandef;
 
 	unsigned long long free_vdev_map;
+	struct ath10k_vif *monitor_arvif;
 	bool monitor;
 	int monitor_vdev_id;
 	bool monitor_started;
@@ -633,6 +674,7 @@
 	int max_num_peers;
 	int max_num_stations;
 	int max_num_vdevs;
+	int max_num_tdls_vdevs;
 
 	struct work_struct offchan_tx_work;
 	struct sk_buff_head offchan_tx_queue;
@@ -653,8 +695,18 @@
 	u32 survey_last_cycle_count;
 	struct survey_info survey[ATH10K_NUM_CHANS];
 
+	/* Channel info events are expected to come in pairs without and with
+	 * COMPLETE flag set respectively for each channel visit during scan.
+	 *
+	 * However there are deviations from this rule. This flag is used to
+	 * avoid reporting garbage data.
+	 */
+	bool ch_info_can_report_survey;
+
 	struct dfs_pattern_detector *dfs_detector;
 
+	unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
 #ifdef CONFIG_ATH10K_DEBUGFS
 	struct ath10k_debug debug;
 #endif
@@ -686,6 +738,7 @@
 	} stats;
 
 	struct ath10k_thermal thermal;
+	struct ath10k_wow wow;
 
 	/* must be last */
 	u8 drv_priv[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 301081d..8fa606a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,10 +124,14 @@
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-	ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+	ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
 		    ar->hw_params.name,
 		    ar->target_version,
 		    ar->chip_id,
+		    (strlen(ar->spec_board_id) > 0 ? ", " : ""),
+		    ar->spec_board_id,
+		    (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
+		     ? " fallback" : ""),
 		    ar->hw->wiphy->fw_version,
 		    ar->fw_api,
 		    ar->htt.target_version_major,
@@ -380,12 +384,12 @@
 
 static int ath10k_debug_fw_stats_request(struct ath10k *ar)
 {
-	unsigned long timeout;
+	unsigned long timeout, time_left;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	timeout = jiffies + msecs_to_jiffies(1*HZ);
+	timeout = jiffies + msecs_to_jiffies(1 * HZ);
 
 	ath10k_debug_fw_stats_reset(ar);
 
@@ -395,18 +399,16 @@
 
 		reinit_completion(&ar->debug.fw_stats_complete);
 
-		ret = ath10k_wmi_request_stats(ar,
-					       WMI_STAT_PDEV |
-					       WMI_STAT_VDEV |
-					       WMI_STAT_PEER);
+		ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
 		if (ret) {
 			ath10k_warn(ar, "could not request stats (%d)\n", ret);
 			return ret;
 		}
 
-		ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
-						  1*HZ);
-		if (ret == 0)
+		time_left =
+		wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+					    1 * HZ);
+		if (!time_left)
 			return -ETIMEDOUT;
 
 		spin_lock_bh(&ar->data_lock);
@@ -1708,6 +1710,61 @@
 	return 0;
 }
 
+static ssize_t ath10k_write_ani_enable(struct file *file,
+				       const char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int ret;
+	u8 enable;
+
+	if (kstrtou8_from_user(user_buf, count, 0, &enable))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->ani_enabled == enable) {
+		ret = count;
+		goto exit;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+					enable);
+	if (ret) {
+		ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+		goto exit;
+	}
+	ar->ani_enabled = enable;
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+	char buf[32];
+
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			ar->ani_enabled);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+	.read = ath10k_read_ani_enable,
+	.write = ath10k_write_ani_enable,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static const struct file_operations fops_cal_data = {
 	.open = ath10k_debug_cal_data_open,
 	.read = ath10k_debug_cal_data_read,
@@ -1991,6 +2048,50 @@
 	.open = simple_open
 };
 
+static ssize_t ath10k_write_quiet_period(struct file *file,
+					 const char __user *ubuf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u32 period;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &period))
+		return -EINVAL;
+
+	if (period < ATH10K_QUIET_PERIOD_MIN) {
+		ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+			    period);
+		return -EINVAL;
+	}
+	mutex_lock(&ar->conf_mutex);
+	ar->thermal.quiet_period = period;
+	ath10k_thermal_set_throttling(ar);
+	mutex_unlock(&ar->conf_mutex);
+
+	return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	char buf[32];
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			ar->thermal.quiet_period);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+	.read = ath10k_read_quiet_period,
+	.write = ath10k_write_quiet_period,
+	.open = simple_open
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
 	ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
@@ -2068,6 +2169,9 @@
 	debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_cal_data);
 
+	debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_ani_enable);
+
 	debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
 			    ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
 
@@ -2088,6 +2192,9 @@
 	debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
 			    ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
 
+	debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_quiet_period);
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index a12b832..53bd6a1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -36,6 +36,7 @@
 	ATH10K_DBG_REGULATORY	= 0x00000800,
 	ATH10K_DBG_TESTMODE	= 0x00001000,
 	ATH10K_DBG_WMI_PRINT	= 0x00002000,
+	ATH10K_DBG_PCI_PS	= 0x00004000,
 	ATH10K_DBG_ANY		= 0xffffffff,
 };
 
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 2fd9e18..85bfa2a 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -86,21 +86,6 @@
 	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
 }
 
-/* assumes tx_lock is held */
-static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
-{
-	struct ath10k *ar = ep->htc->ar;
-
-	if (!ep->tx_credit_flow_enabled)
-		return false;
-	if (ep->tx_credits >= ep->tx_credits_per_max_message)
-		return false;
-
-	ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
-		   ep->eid);
-	return true;
-}
-
 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
 				      struct sk_buff *skb)
 {
@@ -111,13 +96,10 @@
 	hdr->eid = ep->eid;
 	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
 	hdr->flags = 0;
+	hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
 
 	spin_lock_bh(&ep->htc->tx_lock);
 	hdr->seq_no = ep->seq_no++;
-
-	if (ath10k_htc_ep_need_credit_update(ep))
-		hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
-
 	spin_unlock_bh(&ep->htc->tx_lock);
 }
 
@@ -414,7 +396,8 @@
 		struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
 
 		switch (__le16_to_cpu(msg->hdr.message_id)) {
-		default:
+		case ATH10K_HTC_MSG_READY_ID:
+		case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
 			/* handle HTC control message */
 			if (completion_done(&htc->ctl_resp)) {
 				/*
@@ -438,6 +421,10 @@
 			break;
 		case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
 			htc->htc_ops.target_send_suspend_complete(ar);
+			break;
+		default:
+			ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+			break;
 		}
 		goto out;
 	}
@@ -548,6 +535,7 @@
 {
 	struct ath10k *ar = htc->ar;
 	int i, status = 0;
+	unsigned long time_left;
 	struct ath10k_htc_svc_conn_req conn_req;
 	struct ath10k_htc_svc_conn_resp conn_resp;
 	struct ath10k_htc_msg *msg;
@@ -555,9 +543,9 @@
 	u16 credit_count;
 	u16 credit_size;
 
-	status = wait_for_completion_timeout(&htc->ctl_resp,
-					     ATH10K_HTC_WAIT_TIMEOUT_HZ);
-	if (status == 0) {
+	time_left = wait_for_completion_timeout(&htc->ctl_resp,
+						ATH10K_HTC_WAIT_TIMEOUT_HZ);
+	if (!time_left) {
 		/* Workaround: In some cases the PCI HIF doesn't
 		 * receive interrupt for the control response message
 		 * even if the buffer was completed. It is suspected
@@ -569,10 +557,11 @@
 		for (i = 0; i < CE_COUNT; i++)
 			ath10k_hif_send_complete_check(htc->ar, i, 1);
 
-		status = wait_for_completion_timeout(&htc->ctl_resp,
-						     ATH10K_HTC_WAIT_TIMEOUT_HZ);
+		time_left =
+		wait_for_completion_timeout(&htc->ctl_resp,
+					    ATH10K_HTC_WAIT_TIMEOUT_HZ);
 
-		if (status == 0)
+		if (!time_left)
 			status = -ETIMEDOUT;
 	}
 
@@ -646,6 +635,7 @@
 	struct sk_buff *skb;
 	unsigned int max_msg_size = 0;
 	int length, status;
+	unsigned long time_left;
 	bool disable_credit_flow_ctrl = false;
 	u16 message_id, service_id, flags = 0;
 	u8 tx_alloc = 0;
@@ -701,10 +691,10 @@
 	}
 
 	/* wait for response */
-	status = wait_for_completion_timeout(&htc->ctl_resp,
-					     ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
-	if (status == 0) {
-		ath10k_err(ar, "Service connect timeout: %d\n", status);
+	time_left = wait_for_completion_timeout(&htc->ctl_resp,
+						ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+	if (!time_left) {
+		ath10k_err(ar, "Service connect timeout\n");
 		return -ETIMEDOUT;
 	}
 
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 4f59ab9..6da6ef2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,6 +22,86 @@
 #include "core.h"
 #include "debug.h"
 
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+	[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+		HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+		HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+		HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+		HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+	[HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+	[HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+	[HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+	[HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+		HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+	[HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+		HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+		HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+		HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+		HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+	[HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+		HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+	[HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+		HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+	[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
 int ath10k_htt_connect(struct ath10k_htt *htt)
 {
 	struct ath10k_htc_svc_conn_req conn_req;
@@ -66,6 +146,24 @@
 		8 + /* llc snap */
 		2; /* ip4 dscp or ip6 priority */
 
+	switch (ar->htt.op_version) {
+	case ATH10K_FW_HTT_OP_VERSION_10_1:
+		ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_TLV:
+		ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_MAIN:
+		ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_MAX:
+	case ATH10K_FW_HTT_OP_VERSION_UNSET:
+		WARN_ON(1);
+		return -EINVAL;
+	}
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 874bf44..7e8a0d8 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -25,7 +25,9 @@
 #include <net/mac80211.h>
 
 #include "htc.h"
+#include "hw.h"
 #include "rx_desc.h"
+#include "hw.h"
 
 enum htt_dbg_stats_type {
 	HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -271,35 +273,108 @@
 
 /*=== target -> host messages ===============================================*/
 
-enum htt_t2h_msg_type {
-	HTT_T2H_MSG_TYPE_VERSION_CONF		= 0x0,
-	HTT_T2H_MSG_TYPE_RX_IND			= 0x1,
-	HTT_T2H_MSG_TYPE_RX_FLUSH		= 0x2,
-	HTT_T2H_MSG_TYPE_PEER_MAP		= 0x3,
-	HTT_T2H_MSG_TYPE_PEER_UNMAP		= 0x4,
-	HTT_T2H_MSG_TYPE_RX_ADDBA		= 0x5,
-	HTT_T2H_MSG_TYPE_RX_DELBA		= 0x6,
-	HTT_T2H_MSG_TYPE_TX_COMPL_IND		= 0x7,
-	HTT_T2H_MSG_TYPE_PKTLOG			= 0x8,
-	HTT_T2H_MSG_TYPE_STATS_CONF		= 0x9,
-	HTT_T2H_MSG_TYPE_RX_FRAG_IND		= 0xa,
-	HTT_T2H_MSG_TYPE_SEC_IND		= 0xb,
-	HTT_T2H_MSG_TYPE_RC_UPDATE_IND		= 0xc,
-	HTT_T2H_MSG_TYPE_TX_INSPECT_IND		= 0xd,
-	HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION	= 0xe,
-	HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND	= 0xf,
-	HTT_T2H_MSG_TYPE_RX_PN_IND		= 0x10,
-	HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
-	HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND	= 0x12,
+enum htt_main_t2h_msg_type {
+	HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF             = 0x0,
+	HTT_MAIN_T2H_MSG_TYPE_RX_IND                   = 0x1,
+	HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH                 = 0x2,
+	HTT_MAIN_T2H_MSG_TYPE_PEER_MAP                 = 0x3,
+	HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP               = 0x4,
+	HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA                 = 0x5,
+	HTT_MAIN_T2H_MSG_TYPE_RX_DELBA                 = 0x6,
+	HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND             = 0x7,
+	HTT_MAIN_T2H_MSG_TYPE_PKTLOG                   = 0x8,
+	HTT_MAIN_T2H_MSG_TYPE_STATS_CONF               = 0x9,
+	HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND              = 0xa,
+	HTT_MAIN_T2H_MSG_TYPE_SEC_IND                  = 0xb,
+	HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND           = 0xd,
+	HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND        = 0xe,
+	HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND     = 0xf,
+	HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND                = 0x10,
+	HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND   = 0x11,
+	HTT_MAIN_T2H_MSG_TYPE_TEST,
+	/* keep this last */
+	HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+	HTT_10X_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
+	HTT_10X_T2H_MSG_TYPE_RX_IND                    = 0x1,
+	HTT_10X_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
+	HTT_10X_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
+	HTT_10X_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
+	HTT_10X_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
+	HTT_10X_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
+	HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
+	HTT_10X_T2H_MSG_TYPE_PKTLOG                    = 0x8,
+	HTT_10X_T2H_MSG_TYPE_STATS_CONF                = 0x9,
+	HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
+	HTT_10X_T2H_MSG_TYPE_SEC_IND                   = 0xb,
+	HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc,
+	HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
+	HTT_10X_T2H_MSG_TYPE_TEST                      = 0xe,
+	HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE               = 0xf,
+	HTT_10X_T2H_MSG_TYPE_AGGR_CONF                 = 0x11,
+	HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD            = 0x12,
+	HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0x13,
+	/* keep this last */
+	HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+	HTT_TLV_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
+	HTT_TLV_T2H_MSG_TYPE_RX_IND                    = 0x1,
+	HTT_TLV_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
+	HTT_TLV_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
+	HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
+	HTT_TLV_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
+	HTT_TLV_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
+	HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
+	HTT_TLV_T2H_MSG_TYPE_PKTLOG                    = 0x8,
+	HTT_TLV_T2H_MSG_TYPE_STATS_CONF                = 0x9,
+	HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
+	HTT_TLV_T2H_MSG_TYPE_SEC_IND                   = 0xb,
+	HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc, /* deprecated */
+	HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
+	HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0xe,
+	HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND      = 0xf,
+	HTT_TLV_T2H_MSG_TYPE_RX_PN_IND                 = 0x10,
+	HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND    = 0x11,
+	HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND       = 0x12,
 	/* 0x13 reservd */
-	HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE	= 0x14,
+	HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE       = 0x14,
+	HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE               = 0x15,
+	HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR           = 0x16,
+	HTT_TLV_T2H_MSG_TYPE_TEST,
+	/* keep this last */
+	HTT_TLV_T2H_NUM_MSGS
+};
 
-	/* FIXME: Do not depend on this event id. Numbering of this event id is
-	 * broken across different firmware revisions and HTT version fails to
-	 * indicate this.
-	 */
+enum htt_t2h_msg_type {
+	HTT_T2H_MSG_TYPE_VERSION_CONF,
+	HTT_T2H_MSG_TYPE_RX_IND,
+	HTT_T2H_MSG_TYPE_RX_FLUSH,
+	HTT_T2H_MSG_TYPE_PEER_MAP,
+	HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	HTT_T2H_MSG_TYPE_RX_ADDBA,
+	HTT_T2H_MSG_TYPE_RX_DELBA,
+	HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	HTT_T2H_MSG_TYPE_PKTLOG,
+	HTT_T2H_MSG_TYPE_STATS_CONF,
+	HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	HTT_T2H_MSG_TYPE_SEC_IND,
+	HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	HTT_T2H_MSG_TYPE_RX_PN_IND,
+	HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+	HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+	HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+	HTT_T2H_MSG_TYPE_AGGR_CONF,
+	HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
 	HTT_T2H_MSG_TYPE_TEST,
-
 	/* keep this last */
 	HTT_T2H_NUM_MSGS
 };
@@ -1222,6 +1297,7 @@
 	u32 msdu_id;
 	bool discard;
 	bool no_ack;
+	bool success;
 };
 
 struct htt_peer_map_event {
@@ -1248,6 +1324,10 @@
 	u8 target_version_major;
 	u8 target_version_minor;
 	struct completion target_version_received;
+	enum ath10k_fw_htt_op_version op_version;
+
+	const enum htt_t2h_msg_type *t2h_msg_types;
+	u32 t2h_msg_types_max;
 
 	struct {
 		/*
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 01a2b38..89eb16b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -637,58 +637,21 @@
 	return 0;
 }
 
-struct rfc1042_hdr {
-	u8 llc_dsap;
-	u8 llc_ssap;
-	u8 llc_ctrl;
-	u8 snap_oui[3];
-	__be16 snap_type;
-} __packed;
-
 struct amsdu_subframe_hdr {
 	u8 dst[ETH_ALEN];
 	u8 src[ETH_ALEN];
 	__be16 len;
 } __packed;
 
-static const u8 rx_legacy_rate_idx[] = {
-	3,	/* 0x00  - 11Mbps  */
-	2,	/* 0x01  - 5.5Mbps */
-	1,	/* 0x02  - 2Mbps   */
-	0,	/* 0x03  - 1Mbps   */
-	3,	/* 0x04  - 11Mbps  */
-	2,	/* 0x05  - 5.5Mbps */
-	1,	/* 0x06  - 2Mbps   */
-	0,	/* 0x07  - 1Mbps   */
-	10,	/* 0x08  - 48Mbps  */
-	8,	/* 0x09  - 24Mbps  */
-	6,	/* 0x0A  - 12Mbps  */
-	4,	/* 0x0B  - 6Mbps   */
-	11,	/* 0x0C  - 54Mbps  */
-	9,	/* 0x0D  - 36Mbps  */
-	7,	/* 0x0E  - 18Mbps  */
-	5,	/* 0x0F  - 9Mbps   */
-};
-
 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
 				  struct ieee80211_rx_status *status,
 				  struct htt_rx_desc *rxd)
 {
-	enum ieee80211_band band;
-	u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+	struct ieee80211_supported_band *sband;
+	u8 cck, rate, bw, sgi, mcs, nss;
 	u8 preamble = 0;
 	u32 info1, info2, info3;
 
-	/* Band value can't be set as undefined but freq can be 0 - use that to
-	 * determine whether band is provided.
-	 *
-	 * FIXME: Perhaps this can go away if CCK rate reporting is a little
-	 * reworked?
-	 */
-	if (!status->freq)
-		return;
-
-	band = status->band;
 	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
 	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
 	info3 = __le32_to_cpu(rxd->ppdu_start.info3);
@@ -697,31 +660,18 @@
 
 	switch (preamble) {
 	case HTT_RX_LEGACY:
+		/* To get legacy rate index band is required. Since band can't
+		 * be undefined check if freq is non-zero.
+		 */
+		if (!status->freq)
+			return;
+
 		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
 		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
-		rate_idx = 0;
+		rate &= ~RX_PPDU_START_RATE_FLAG;
 
-		if (rate < 0x08 || rate > 0x0F)
-			break;
-
-		switch (band) {
-		case IEEE80211_BAND_2GHZ:
-			if (cck)
-				rate &= ~BIT(3);
-			rate_idx = rx_legacy_rate_idx[rate];
-			break;
-		case IEEE80211_BAND_5GHZ:
-			rate_idx = rx_legacy_rate_idx[rate];
-			/* We are using same rate table registering
-			   HW - ath10k_rates[]. In case of 5GHz skip
-			   CCK rates, so -4 here */
-			rate_idx -= 4;
-			break;
-		default:
-			break;
-		}
-
-		status->rate_idx = rate_idx;
+		sband = &ar->mac.sbands[status->band];
+		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
 		break;
 	case HTT_RX_HT:
 	case HTT_RX_HT_WITH_TXBF:
@@ -773,8 +723,87 @@
 	}
 }
 
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	struct cfg80211_chan_def def;
+	u16 peer_id;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (!rxd)
+		return NULL;
+
+	if (rxd->attention.flags &
+	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+		return NULL;
+
+	if (!(rxd->msdu_end.info0 &
+	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+		return NULL;
+
+	peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+		     RX_MPDU_START_INFO0_PEER_IDX);
+
+	peer = ath10k_peer_find_by_id(ar, peer_id);
+	if (!peer)
+		return NULL;
+
+	arvif = ath10k_get_arvif(ar, peer->vdev_id);
+	if (WARN_ON_ONCE(!arvif))
+		return NULL;
+
+	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+		return NULL;
+
+	return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+	struct ath10k_vif *arvif;
+	struct cfg80211_chan_def def;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->vdev_id == vdev_id &&
+		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+			return def.chan;
+	}
+
+	return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+			      struct ieee80211_chanctx_conf *conf,
+			      void *data)
+{
+	struct cfg80211_chan_def *def = data;
+
+	*def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+	struct cfg80211_chan_def def = {};
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_htt_rx_h_any_chan_iter,
+					    &def);
+
+	return def.chan;
+}
+
 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
-				    struct ieee80211_rx_status *status)
+				    struct ieee80211_rx_status *status,
+				    struct htt_rx_desc *rxd,
+				    u32 vdev_id)
 {
 	struct ieee80211_channel *ch;
 
@@ -782,6 +811,12 @@
 	ch = ar->scan_channel;
 	if (!ch)
 		ch = ar->rx_channel;
+	if (!ch)
+		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+	if (!ch)
+		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+	if (!ch)
+		ch = ath10k_htt_rx_h_any_channel(ar);
 	spin_unlock_bh(&ar->data_lock);
 
 	if (!ch)
@@ -819,7 +854,8 @@
 
 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
 				 struct sk_buff_head *amsdu,
-				 struct ieee80211_rx_status *status)
+				 struct ieee80211_rx_status *status,
+				 u32 vdev_id)
 {
 	struct sk_buff *first;
 	struct htt_rx_desc *rxd;
@@ -851,7 +887,7 @@
 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
 		ath10k_htt_rx_h_signal(ar, status, rxd);
-		ath10k_htt_rx_h_channel(ar, status);
+		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
 		ath10k_htt_rx_h_rates(ar, status, rxd);
 	}
 
@@ -929,10 +965,16 @@
 	ieee80211_rx(ar->hw, skb);
 }
 
-static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+				      struct ieee80211_hdr *hdr)
 {
-	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
-	return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+	int len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+		      ar->fw_features))
+		len = round_up(len, 4);
+
+	return len;
 }
 
 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
@@ -1031,7 +1073,7 @@
 
 	/* pull decapped header and copy SA & DA */
 	hdr = (struct ieee80211_hdr *)msdu->data;
-	hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
 	ether_addr_copy(da, ieee80211_get_DA(hdr));
 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
 	skb_pull(msdu, hdr_len);
@@ -1522,7 +1564,7 @@
 			break;
 		}
 
-		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 		ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
 		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
 		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
@@ -1569,7 +1611,7 @@
 		return;
 	}
 
-	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
 	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
@@ -1598,6 +1640,7 @@
 		tx_done.no_ack = true;
 		break;
 	case HTT_DATA_TX_STATUS_OK:
+		tx_done.success = true;
 		break;
 	case HTT_DATA_TX_STATUS_DISCARD:
 	case HTT_DATA_TX_STATUS_POSTPONE:
@@ -1796,7 +1839,7 @@
 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
-		ath10k_htt_rx_h_channel(ar, status);
+		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
 		ath10k_process_rx(ar, status, msdu);
 	}
 }
@@ -1869,7 +1912,7 @@
 			 * better to report something than nothing though. This
 			 * should still give an idea about rx rate to the user.
 			 */
-			ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
 			ath10k_htt_rx_h_deliver(ar, &amsdu, status);
@@ -1892,6 +1935,7 @@
 {
 	struct ath10k_htt *htt = &ar->htt;
 	struct htt_resp *resp = (struct htt_resp *)skb->data;
+	enum htt_t2h_msg_type type;
 
 	/* confirm alignment */
 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
@@ -1899,7 +1943,16 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
 		   resp->hdr.msg_type);
-	switch (resp->hdr.msg_type) {
+
+	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+	switch (type) {
 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
 		htt->target_version_major = resp->ver_resp.major;
 		htt->target_version_minor = resp->ver_resp.minor;
@@ -1937,6 +1990,7 @@
 
 		switch (status) {
 		case HTT_MGMT_TX_STATUS_OK:
+			tx_done.success = true;
 			break;
 		case HTT_MGMT_TX_STATUS_RETRY:
 			tx_done.no_ack = true;
@@ -1976,7 +2030,6 @@
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TEST:
-		/* FIX THIS */
 		break;
 	case HTT_T2H_MSG_TYPE_STATS_CONF:
 		trace_ath10k_htt_stats(ar, skb->data, skb->len);
@@ -2018,11 +2071,8 @@
 		return;
 	}
 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
-		/* FIXME: This WMI-TLV event is overlapping with 10.2
-		 * CHAN_CHANGE - both being 0xF. Neither is being used in
-		 * practice so no immediate action is necessary. Nevertheless
-		 * HTT may need an abstraction layer like WMI has one day.
-		 */
+		break;
+	case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
 		break;
 	default:
 		ath10k_warn(ar, "htt event (%d) not handled\n",
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index cbd2bc9..a60ef7d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -26,7 +26,7 @@
 {
 	htt->num_pending_tx--;
 	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
-		ieee80211_wake_queues(htt->ar->hw);
+		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 }
 
 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
@@ -49,7 +49,7 @@
 
 	htt->num_pending_tx++;
 	if (htt->num_pending_tx == htt->max_num_pending_tx)
-		ieee80211_stop_queues(htt->ar->hw);
+		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 
 exit:
 	spin_unlock_bh(&htt->tx_lock);
@@ -420,9 +420,8 @@
 	int res;
 	u8 flags0 = 0;
 	u16 msdu_id, flags1 = 0;
-	dma_addr_t paddr;
-	u32 frags_paddr;
-	bool use_frags;
+	dma_addr_t paddr = 0;
+	u32 frags_paddr = 0;
 
 	res = ath10k_htt_tx_inc_pending(htt);
 	if (res)
@@ -440,12 +439,6 @@
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
 
-	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
-	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
-	 * fragment list host driver specifies directly frame pointer. */
-	use_frags = htt->target_version_major < 3 ||
-		    !ieee80211_is_mgmt(hdr->frame_control);
-
 	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
 					   &paddr);
 	if (!skb_cb->htt.txbuf) {
@@ -466,7 +459,12 @@
 	if (res)
 		goto err_free_txbuf;
 
-	if (likely(use_frags)) {
+	switch (skb_cb->txmode) {
+	case ATH10K_HW_TXRX_RAW:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+		/* pass through */
+	case ATH10K_HW_TXRX_ETHERNET:
 		frags = skb_cb->htt.txbuf->frags;
 
 		frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
@@ -474,15 +472,17 @@
 		frags[1].paddr = 0;
 		frags[1].len = 0;
 
-		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
-			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 
 		frags_paddr = skb_cb->htt.txbuf_paddr;
-	} else {
+		break;
+	case ATH10K_HW_TXRX_MGMT:
 		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
 			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 
 		frags_paddr = skb_cb->paddr;
+		break;
 	}
 
 	/* Normally all commands go through HTC which manages tx credits for
@@ -508,11 +508,9 @@
 			prefetch_len);
 	skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
-	if (!ieee80211_has_protected(hdr->frame_control))
+	if (!skb_cb->is_protected)
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
-	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-
 	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
 	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
 	if (msdu->ip_summed == CHECKSUM_PARTIAL) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 839a879..5997f00 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/types.h>
+#include "core.h"
 #include "hw.h"
 
 const struct ath10k_hw_regs qca988x_regs = {
@@ -56,3 +57,23 @@
 	.soc_chip_id_address			= 0x000f0,
 	.scratch_3_address			= 0x0028,
 };
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
+{
+	u32 cc_fix = 0;
+
+	survey->filled |= SURVEY_INFO_TIME |
+			  SURVEY_INFO_TIME_BUSY;
+
+	if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) {
+		cc_fix = 0x7fffffff;
+		survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+	}
+
+	cc -= cc_prev - cc_fix;
+	rcc -= rcc_prev;
+
+	survey->time = CCNT_TO_MSEC(cc);
+	survey->time_busy = CCNT_TO_MSEC(rcc);
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 460771f..85cca29 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -78,6 +78,9 @@
 /* added support for ATH10K_FW_IE_WMI_OP_VERSION */
 #define ATH10K_FW_API4_FILE		"firmware-4.bin"
 
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE		"firmware-5.bin"
+
 #define ATH10K_FW_UTF_FILE		"utf.bin"
 
 /* includes also the null byte */
@@ -104,6 +107,11 @@
 	 * FW API 4 and above.
 	 */
 	ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+	/* HTT "operations" interface version, 32 bit value. Supported from
+	 * FW API 5 and above.
+	 */
+	ATH10K_FW_IE_HTT_OP_VERSION = 6,
 };
 
 enum ath10k_fw_wmi_op_version {
@@ -119,6 +127,20 @@
 	ATH10K_FW_WMI_OP_VERSION_MAX,
 };
 
+enum ath10k_fw_htt_op_version {
+	ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+	ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+	/* also used in 10.2 and 10.2.4 branches */
+	ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+	ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+	/* keep last */
+	ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
 enum ath10k_hw_rev {
 	ATH10K_HW_QCA988X,
 	ATH10K_HW_QCA6174,
@@ -147,6 +169,9 @@
 extern const struct ath10k_hw_regs qca988x_regs;
 extern const struct ath10k_hw_regs qca6174_regs;
 
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
+
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
 
@@ -180,6 +205,27 @@
 	u8 payload[0];
 } __packed;
 
+enum ath10k_hw_rate_ofdm {
+	ATH10K_HW_RATE_OFDM_48M = 0,
+	ATH10K_HW_RATE_OFDM_24M,
+	ATH10K_HW_RATE_OFDM_12M,
+	ATH10K_HW_RATE_OFDM_6M,
+	ATH10K_HW_RATE_OFDM_54M,
+	ATH10K_HW_RATE_OFDM_36M,
+	ATH10K_HW_RATE_OFDM_18M,
+	ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+	ATH10K_HW_RATE_CCK_LP_11M = 0,
+	ATH10K_HW_RATE_CCK_LP_5_5M,
+	ATH10K_HW_RATE_CCK_LP_2M,
+	ATH10K_HW_RATE_CCK_LP_1M,
+	ATH10K_HW_RATE_CCK_SP_11M,
+	ATH10K_HW_RATE_CCK_SP_5_5M,
+	ATH10K_HW_RATE_CCK_SP_2M,
+};
+
 /* Target specific defines for MAIN firmware */
 #define TARGET_NUM_VDEVS			8
 #define TARGET_NUM_PEER_AST			2
@@ -223,7 +269,7 @@
 #define TARGET_10X_NUM_WDS_ENTRIES		32
 #define TARGET_10X_DMA_BURST_SIZE		0
 #define TARGET_10X_MAC_AGGR_DELIM		0
-#define TARGET_10X_AST_SKID_LIMIT		16
+#define TARGET_10X_AST_SKID_LIMIT		128
 #define TARGET_10X_NUM_STATIONS			128
 #define TARGET_10X_NUM_PEERS			((TARGET_10X_NUM_STATIONS) + \
 						 (TARGET_10X_NUM_VDEVS))
@@ -256,13 +302,13 @@
 #define TARGET_10_2_DMA_BURST_SIZE		1
 
 /* Target specific defines for WMI-TLV firmware */
-#define TARGET_TLV_NUM_VDEVS			3
+#define TARGET_TLV_NUM_VDEVS			4
 #define TARGET_TLV_NUM_STATIONS			32
-#define TARGET_TLV_NUM_PEERS			((TARGET_TLV_NUM_STATIONS) + \
-						 (TARGET_TLV_NUM_VDEVS) + \
-						 2)
+#define TARGET_TLV_NUM_PEERS			35
+#define TARGET_TLV_NUM_TDLS_VDEVS		1
 #define TARGET_TLV_NUM_TIDS			((TARGET_TLV_NUM_PEERS) * 2)
 #define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS		22
 
 /* Number of Copy Engines supported */
 #define CE_COUNT 8
@@ -406,6 +452,9 @@
 #define SCRATCH_3_ADDRESS			ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS			0x0010
 
+/* Cycle counters are running at 88MHz */
+#define CCNT_TO_MSEC(x) ((x) / 88000)
+
 /* Firmware indications to the Host via SCRATCH_3 register. */
 #define FW_INDICATOR_ADDRESS	(SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
 #define FW_IND_EVENT_PENDING			1
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 973485b..218b6af 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -28,7 +28,131 @@
 #include "txrx.h"
 #include "testmode.h"
 #include "wmi.h"
+#include "wmi-tlv.h"
 #include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+	{ .bitrate = 10,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+	{ .bitrate = 20,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+	switch (bitrate) {
+	case 10:
+	case 20:
+	case 55:
+	case 110:
+		return true;
+	}
+
+	return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+	return DIV_ROUND_UP(bitrate, 5) |
+	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+			     u8 hw_rate)
+{
+	const struct ieee80211_rate *rate;
+	int i;
+
+	for (i = 0; i < sband->n_bitrates; i++) {
+		rate = &sband->bitrates[i];
+
+		if (rate->hw_value == hw_rate)
+			return i;
+		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+			 rate->hw_value_short == hw_rate)
+			return i;
+	}
+
+	return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+			     u32 bitrate)
+{
+	int i;
+
+	for (i = 0; i < sband->n_bitrates; i++)
+		if (sband->bitrates[i].bitrate == bitrate)
+			return i;
+
+	return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+	switch ((mcs_map >> (2 * nss)) & 0x3) {
+	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+	}
+	return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+	int nss;
+
+	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+		if (ht_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+	int nss;
+
+	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+		if (vht_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
 
 /**********/
 /* Crypto */
@@ -37,7 +161,7 @@
 static int ath10k_send_key(struct ath10k_vif *arvif,
 			   struct ieee80211_key_conf *key,
 			   enum set_key_cmd cmd,
-			   const u8 *macaddr, bool def_idx)
+			   const u8 *macaddr, u32 flags)
 {
 	struct ath10k *ar = arvif->ar;
 	struct wmi_vdev_install_key_arg arg = {
@@ -45,16 +169,12 @@
 		.key_idx = key->keyidx,
 		.key_len = key->keylen,
 		.key_data = key->key,
+		.key_flags = flags,
 		.macaddr = macaddr,
 	};
 
 	lockdep_assert_held(&arvif->ar->conf_mutex);
 
-	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
-		arg.key_flags = WMI_KEY_PAIRWISE;
-	else
-		arg.key_flags = WMI_KEY_GROUP;
-
 	switch (key->cipher) {
 	case WLAN_CIPHER_SUITE_CCMP:
 		arg.key_cipher = WMI_CIPHER_AES_CCM;
@@ -68,17 +188,10 @@
 	case WLAN_CIPHER_SUITE_WEP40:
 	case WLAN_CIPHER_SUITE_WEP104:
 		arg.key_cipher = WMI_CIPHER_WEP;
-		/* AP/IBSS mode requires self-key to be groupwise
-		 * Otherwise pairwise key must be set */
-		if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
-			arg.key_flags = WMI_KEY_PAIRWISE;
-
-		if (def_idx)
-			arg.key_flags |= WMI_KEY_TX_USAGE;
 		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
-		/* this one needs to be done in software */
-		return 1;
+		WARN_ON(1);
+		return -EINVAL;
 	default:
 		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
 		return -EOPNOTSUPP;
@@ -95,21 +208,22 @@
 static int ath10k_install_key(struct ath10k_vif *arvif,
 			      struct ieee80211_key_conf *key,
 			      enum set_key_cmd cmd,
-			      const u8 *macaddr, bool def_idx)
+			      const u8 *macaddr, u32 flags)
 {
 	struct ath10k *ar = arvif->ar;
 	int ret;
+	unsigned long time_left;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
 	reinit_completion(&ar->install_key_done);
 
-	ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
+	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
 	if (ret)
 		return ret;
 
-	ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
-	if (ret == 0)
+	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+	if (time_left == 0)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -122,7 +236,7 @@
 	struct ath10k_peer *peer;
 	int ret;
 	int i;
-	bool def_idx;
+	u32 flags;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -136,14 +250,20 @@
 	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
 		if (arvif->wep_keys[i] == NULL)
 			continue;
-		/* set TX_USAGE flag for default key id */
-		if (arvif->def_wep_key_idx == i)
-			def_idx = true;
-		else
-			def_idx = false;
+
+		flags = 0;
+		flags |= WMI_KEY_PAIRWISE;
 
 		ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
-					 addr, def_idx);
+					 addr, flags);
+		if (ret)
+			return ret;
+
+		flags = 0;
+		flags |= WMI_KEY_GROUP;
+
+		ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+					 addr, flags);
 		if (ret)
 			return ret;
 
@@ -152,6 +272,27 @@
 		spin_unlock_bh(&ar->data_lock);
 	}
 
+	/* In some cases (notably with static WEP IBSS with multiple keys)
+	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
+	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
+	 * didn't seem help. Using def_keyid vdev parameter seems to be
+	 * effective so use that.
+	 *
+	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+	 */
+	if (arvif->def_wep_key_idx == -1)
+		return 0;
+
+	ret = ath10k_wmi_vdev_set_param(arvif->ar,
+					arvif->vdev_id,
+					arvif->ar->wmi.vdev_param->def_keyid,
+					arvif->def_wep_key_idx);
+	if (ret) {
+		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -163,6 +304,7 @@
 	int first_errno = 0;
 	int ret;
 	int i;
+	u32 flags = 0;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -179,7 +321,7 @@
 
 		/* key flags are not required to delete the key */
 		ret = ath10k_install_key(arvif, peer->keys[i],
-					 DISABLE_KEY, addr, false);
+					 DISABLE_KEY, addr, flags);
 		if (ret && first_errno == 0)
 			first_errno = ret;
 
@@ -229,6 +371,7 @@
 	int first_errno = 0;
 	int ret;
 	int i;
+	u32 flags = 0;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -254,7 +397,7 @@
 		if (i == ARRAY_SIZE(peer->keys))
 			break;
 		/* key flags are not required to delete the key */
-		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
+		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
 		if (ret && first_errno == 0)
 			first_errno = ret;
 
@@ -266,6 +409,39 @@
 	return first_errno;
 }
 
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+					 struct ieee80211_key_conf *key)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ath10k_peer *peer;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(peer, &ar->peers, list) {
+		if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+			continue;
+
+		if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+			continue;
+
+		if (peer->keys[key->keyidx] == key)
+			continue;
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+			   arvif->vdev_id, key->keyidx);
+
+		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+		if (ret) {
+			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+				    arvif->vdev_id, peer->addr, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 /*********************/
 /* General utilities */
 /*********************/
@@ -364,7 +540,56 @@
 	}
 }
 
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+			struct cfg80211_chan_def *def)
+{
+	struct ieee80211_chanctx_conf *conf;
+
+	rcu_read_lock();
+	conf = rcu_dereference(vif->chanctx_conf);
+	if (!conf) {
+		rcu_read_unlock();
+		return -ENOENT;
+	}
+
+	*def = conf->def;
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+					 struct ieee80211_chanctx_conf *conf,
+					 void *data)
+{
+	int *num = data;
+
+	(*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+	int num = 0;
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_mac_num_chanctxs_iter,
+					    &num);
+
+	return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+				struct ieee80211_chanctx_conf *conf,
+				void *data)
+{
+	struct cfg80211_chan_def **def = data;
+
+	*def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+			      enum wmi_peer_type peer_type)
 {
 	int ret;
 
@@ -373,7 +598,7 @@
 	if (ar->num_peers >= ar->max_num_peers)
 		return -ENOBUFS;
 
-	ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
 	if (ret) {
 		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
 			    addr, vdev_id, ret);
@@ -517,6 +742,38 @@
 	ar->num_stations = 0;
 }
 
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+				       struct ieee80211_sta *sta,
+				       enum wmi_tdls_peer_state state)
+{
+	int ret;
+	struct wmi_tdls_peer_update_cmd_arg arg = {};
+	struct wmi_tdls_peer_capab_arg cap = {};
+	struct wmi_channel_arg chan_arg = {};
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	arg.vdev_id = vdev_id;
+	arg.peer_state = state;
+	ether_addr_copy(arg.addr, sta->addr);
+
+	cap.peer_max_sp = sta->max_sp;
+	cap.peer_uapsd_queues = sta->uapsd_queues;
+
+	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+	    !sta->tdls_initiator)
+		cap.is_peer_responder = 1;
+
+	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+			    arg.addr, vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 /************************/
 /* Interface management */
 /************************/
@@ -561,16 +818,16 @@
 
 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 {
-	int ret;
+	unsigned long time_left;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
 		return -ESHUTDOWN;
 
-	ret = wait_for_completion_timeout(&ar->vdev_setup_done,
-					  ATH10K_VDEV_SETUP_TIMEOUT_HZ);
-	if (ret == 0)
+	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+	if (time_left == 0)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -578,13 +835,21 @@
 
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
-	struct cfg80211_chan_def *chandef = &ar->chandef;
+	struct cfg80211_chan_def *chandef = NULL;
 	struct ieee80211_channel *channel = chandef->chan;
 	struct wmi_vdev_start_request_arg arg = {};
 	int ret = 0;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_mac_get_any_chandef_iter,
+					    &chandef);
+	if (WARN_ON_ONCE(!chandef))
+		return -ENOENT;
+
+	channel = chandef->chan;
+
 	arg.vdev_id = vdev_id;
 	arg.channel.freq = channel->center_freq;
 	arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -766,27 +1031,78 @@
 	return 0;
 }
 
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+	int num_ctx;
+
+	/* At least one chanctx is required to derive a channel to start
+	 * monitor vdev on.
+	 */
+	num_ctx = ath10k_mac_num_chanctxs(ar);
+	if (num_ctx == 0)
+		return false;
+
+	/* If there's already an existing special monitor interface then don't
+	 * bother creating another monitor vdev.
+	 */
+	if (ar->monitor_arvif)
+		return false;
+
+	return ar->monitor ||
+	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+	int num_ctx;
+
+	num_ctx = ath10k_mac_num_chanctxs(ar);
+
+	/* FIXME: Current interface combinations and cfg80211/mac80211 code
+	 * shouldn't allow this but make sure to prevent handling the following
+	 * case anyway since multi-channel DFS hasn't been tested at all.
+	 */
+	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+		return false;
+
+	return true;
+}
+
 static int ath10k_monitor_recalc(struct ath10k *ar)
 {
-	bool should_start;
+	bool needed;
+	bool allowed;
+	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	should_start = ar->monitor ||
-		       ar->filter_flags & FIF_PROMISC_IN_BSS ||
-		       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+	needed = ath10k_mac_monitor_vdev_is_needed(ar);
+	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac monitor recalc started? %d should? %d\n",
-		   ar->monitor_started, should_start);
+		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
+		   ar->monitor_started, needed, allowed);
 
-	if (should_start == ar->monitor_started)
+	if (WARN_ON(needed && !allowed)) {
+		if (ar->monitor_started) {
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+			ret = ath10k_monitor_stop(ar);
+			if (ret)
+				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+				/* not serious */
+		}
+
+		return -EPERM;
+	}
+
+	if (needed == ar->monitor_started)
 		return 0;
 
-	if (should_start)
+	if (needed)
 		return ath10k_monitor_start(ar);
-
-	return ath10k_monitor_stop(ar);
+	else
+		return ath10k_monitor_stop(ar);
 }
 
 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
@@ -798,12 +1114,14 @@
 
 	vdev_param = ar->wmi.vdev_param->enable_rtscts;
 
-	if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
-		rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
 
 	if (arvif->num_legacy_stations > 0)
 		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
 			      WMI_RTSCTS_PROFILE);
+	else
+		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+			      WMI_RTSCTS_PROFILE);
 
 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 					 rts_cts);
@@ -846,6 +1164,27 @@
 	return 0;
 }
 
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+				      struct ieee80211_chanctx_conf *conf,
+				      void *data)
+{
+	bool *ret = data;
+
+	if (!*ret && conf->radar_enabled)
+		*ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+	bool has_radar = false;
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_mac_has_radar_iter,
+					    &has_radar);
+
+	return has_radar;
+}
+
 static void ath10k_recalc_radar_detection(struct ath10k *ar)
 {
 	int ret;
@@ -854,7 +1193,7 @@
 
 	ath10k_stop_cac(ar);
 
-	if (!ar->radar_enabled)
+	if (!ath10k_mac_has_radar_enabled(ar))
 		return;
 
 	if (ar->num_started_vdevs > 0)
@@ -872,10 +1211,44 @@
 	}
 }
 
-static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
 {
 	struct ath10k *ar = arvif->ar;
-	struct cfg80211_chan_def *chandef = &ar->chandef;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->vdev_setup_done);
+
+	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_vdev_setup_sync(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	WARN_ON(ar->num_started_vdevs == 0);
+
+	if (ar->num_started_vdevs != 0) {
+		ar->num_started_vdevs--;
+		ath10k_recalc_radar_detection(ar);
+	}
+
+	return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+				     const struct cfg80211_chan_def *chandef,
+				     bool restart)
+{
+	struct ath10k *ar = arvif->ar;
 	struct wmi_vdev_start_request_arg arg = {};
 	int ret = 0;
 
@@ -939,47 +1312,16 @@
 	return ret;
 }
 
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+			     const struct cfg80211_chan_def *def)
 {
-	return ath10k_vdev_start_restart(arvif, false);
+	return ath10k_vdev_start_restart(arvif, def, false);
 }
 
-static int ath10k_vdev_restart(struct ath10k_vif *arvif)
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+			       const struct cfg80211_chan_def *def)
 {
-	return ath10k_vdev_start_restart(arvif, true);
-}
-
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
-{
-	struct ath10k *ar = arvif->ar;
-	int ret;
-
-	lockdep_assert_held(&ar->conf_mutex);
-
-	reinit_completion(&ar->vdev_setup_done);
-
-	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
-	if (ret) {
-		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
-			    arvif->vdev_id, ret);
-		return ret;
-	}
-
-	ret = ath10k_vdev_setup_sync(ar);
-	if (ret) {
-		ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
-			    arvif->vdev_id, ret);
-		return ret;
-	}
-
-	WARN_ON(ar->num_started_vdevs == 0);
-
-	if (ar->num_started_vdevs != 0) {
-		ar->num_started_vdevs--;
-		ath10k_recalc_radar_detection(ar);
-	}
-
-	return ret;
+	return ath10k_vdev_start_restart(arvif, def, true);
 }
 
 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
@@ -1056,6 +1398,10 @@
 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
 		return 0;
 
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+		return 0;
+
 	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
 	if (!bcn) {
 		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
@@ -1101,6 +1447,9 @@
 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
 		return 0;
 
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return 0;
+
 	prb = ieee80211_proberesp_get(hw, vif);
 	if (!prb) {
 		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
@@ -1119,6 +1468,80 @@
 	return 0;
 }
 
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct cfg80211_chan_def def;
+	int ret;
+
+	/* When originally vdev is started during assign_vif_chanctx() some
+	 * information is missing, notably SSID. Firmware revisions with beacon
+	 * offloading require the SSID to be provided during vdev (re)start to
+	 * handle hidden SSID properly.
+	 *
+	 * Vdev restart must be done after vdev has been both started and
+	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
+	 * deliver vdev restart response event causing timeouts during vdev
+	 * syncing in ath10k.
+	 *
+	 * Note: The vdev down/up and template reinstallation could be skipped
+	 * since only wmi-tlv firmware are known to have beacon offload and
+	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+	 * response delivery. It's probably more robust to keep it as is.
+	 */
+	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+		return 0;
+
+	if (WARN_ON(!arvif->is_started))
+		return -EINVAL;
+
+	if (WARN_ON(!arvif->is_up))
+		return -EINVAL;
+
+	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+		return -EINVAL;
+
+	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+	 * firmware will crash upon vdev up.
+	 */
+
+	ret = ath10k_mac_setup_bcn_tmpl(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_mac_setup_prb_tmpl(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_vdev_restart(arvif, &def);
+	if (ret) {
+		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+				 arvif->bssid);
+	if (ret) {
+		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
 				     struct ieee80211_bss_conf *info)
 {
@@ -1128,9 +1551,11 @@
 	lockdep_assert_held(&arvif->ar->conf_mutex);
 
 	if (!info->enable_beacon) {
-		ath10k_vdev_stop(arvif);
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret)
+			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+				    arvif->vdev_id, ret);
 
-		arvif->is_started = false;
 		arvif->is_up = false;
 
 		spin_lock_bh(&arvif->ar->data_lock);
@@ -1142,10 +1567,6 @@
 
 	arvif->tx_seq_no = 0x1000;
 
-	ret = ath10k_vdev_start(arvif);
-	if (ret)
-		return;
-
 	arvif->aid = 0;
 	ether_addr_copy(arvif->bssid, info->bssid);
 
@@ -1154,13 +1575,18 @@
 	if (ret) {
 		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
 			    arvif->vdev_id, ret);
-		ath10k_vdev_stop(arvif);
 		return;
 	}
 
-	arvif->is_started = true;
 	arvif->is_up = true;
 
+	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+			    arvif->vdev_id, ret);
+		return;
+	}
+
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
 }
 
@@ -1175,11 +1601,6 @@
 	lockdep_assert_held(&arvif->ar->conf_mutex);
 
 	if (!info->ibss_joined) {
-		ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
-		if (ret)
-			ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
-				    self_peer, arvif->vdev_id, ret);
-
 		if (is_zero_ether_addr(arvif->bssid))
 			return;
 
@@ -1188,13 +1609,6 @@
 		return;
 	}
 
-	ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
-	if (ret) {
-		ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
-			    self_peer, arvif->vdev_id, ret);
-		return;
-	}
-
 	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
 	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
 					ATH10K_DEFAULT_ATIM);
@@ -1294,7 +1708,14 @@
 		enable_ps = false;
 	}
 
-	if (enable_ps) {
+	if (!arvif->is_started) {
+		/* mac80211 can update vif powersave state while disconnected.
+		 * Firmware doesn't behave nicely and consumes more power than
+		 * necessary if PS is disabled on a non-started vdev. Hence
+		 * force-enable PS for non-running vdevs.
+		 */
+		psmode = WMI_STA_PS_MODE_ENABLED;
+	} else if (enable_ps) {
 		psmode = WMI_STA_PS_MODE_ENABLED;
 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
 
@@ -1361,6 +1782,123 @@
 	return 0;
 }
 
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_vif *vif = arvif->vif;
+	int ret;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+		return;
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return;
+
+	if (!vif->csa_active)
+		return;
+
+	if (!arvif->is_up)
+		return;
+
+	if (!ieee80211_csa_is_complete(vif)) {
+		ieee80211_csa_update_counter(vif);
+
+		ret = ath10k_mac_setup_bcn_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_mac_setup_prb_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+				    ret);
+	} else {
+		ieee80211_csa_finish(vif);
+	}
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+						ap_csa_work);
+	struct ath10k *ar = arvif->ar;
+
+	mutex_lock(&ar->conf_mutex);
+	ath10k_mac_vif_ap_csa_count_down(arvif);
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+					  struct ieee80211_vif *vif)
+{
+	struct sk_buff *skb = data;
+	struct ieee80211_mgmt *mgmt = (void *)skb->data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+		return;
+
+	cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_mac_handle_beacon_iter,
+						   skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+					       struct ieee80211_vif *vif)
+{
+	u32 *vdev_id = data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_hw *hw = ar->hw;
+
+	if (arvif->vdev_id != *vdev_id)
+		return;
+
+	if (!arvif->is_up)
+		return;
+
+	ieee80211_beacon_loss(vif);
+
+	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
+	 * (done by mac80211) succeeds but beacons do not resume then it
+	 * doesn't make sense to continue operation. Queue connection loss work
+	 * which can be cancelled when beacon is received.
+	 */
+	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+				     ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_mac_handle_beacon_miss_iter,
+						   &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+						connection_loss_work.work);
+	struct ieee80211_vif *vif = arvif->vif;
+
+	if (!arvif->is_up)
+		return;
+
+	ieee80211_connection_loss(vif);
+}
+
 /**********************/
 /* Station management */
 /**********************/
@@ -1388,12 +1926,18 @@
 				      struct wmi_peer_assoc_complete_arg *arg)
 {
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	u32 aid;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (vif->type == NL80211_IFTYPE_STATION)
+		aid = vif->bss_conf.aid;
+	else
+		aid = sta->aid;
+
 	ether_addr_copy(arg->addr, sta->addr);
 	arg->vdev_id = arvif->vdev_id;
-	arg->peer_aid = sta->aid;
+	arg->peer_aid = aid;
 	arg->peer_flags |= WMI_PEER_AUTH;
 	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
 	arg->peer_num_spatial_streams = 1;
@@ -1405,15 +1949,18 @@
 				       struct wmi_peer_assoc_complete_arg *arg)
 {
 	struct ieee80211_bss_conf *info = &vif->bss_conf;
+	struct cfg80211_chan_def def;
 	struct cfg80211_bss *bss;
 	const u8 *rsnie = NULL;
 	const u8 *wpaie = NULL;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
-			       info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
-			       IEEE80211_PRIVACY_ANY);
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
 	if (bss) {
 		const struct cfg80211_bss_ies *ies;
 
@@ -1443,19 +1990,29 @@
 }
 
 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+				      struct ieee80211_vif *vif,
 				      struct ieee80211_sta *sta,
 				      struct wmi_peer_assoc_complete_arg *arg)
 {
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+	struct cfg80211_chan_def def;
 	const struct ieee80211_supported_band *sband;
 	const struct ieee80211_rate *rates;
+	enum ieee80211_band band;
 	u32 ratemask;
+	u8 rate;
 	int i;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
-	ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	band = def.chan->band;
+	sband = ar->hw->wiphy->bands[band];
+	ratemask = sta->supp_rates[band];
+	ratemask &= arvif->bitrate_mask.control[band].legacy;
 	rates = sband->bitrates;
 
 	rateset->num_rates = 0;
@@ -1464,24 +2021,66 @@
 		if (!(ratemask & 1))
 			continue;
 
-		rateset->rates[rateset->num_rates] = rates->hw_value;
+		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+		rateset->rates[rateset->num_rates] = rate;
 		rateset->num_rates++;
 	}
 }
 
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+	int nss;
+
+	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+		if (ht_mcs_mask[nss])
+			return false;
+
+	return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+	int nss;
+
+	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+		if (vht_mcs_mask[nss])
+			return false;
+
+	return true;
+}
+
 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+				   struct ieee80211_vif *vif,
 				   struct ieee80211_sta *sta,
 				   struct wmi_peer_assoc_complete_arg *arg)
 {
 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-	int i, n;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
+	int i, n, max_nss;
 	u32 stbc;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
 	if (!ht_cap->ht_supported)
 		return;
 
+	band = def.chan->band;
+	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+		return;
+
 	arg->peer_flags |= WMI_PEER_HT;
 	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
 				    ht_cap->ampdu_factor)) - 1;
@@ -1500,11 +2099,13 @@
 		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
 	}
 
-	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
-		arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
 
-	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
-		arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+	}
 
 	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
 		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
@@ -1524,9 +2125,12 @@
 	else if (ht_cap->mcs.rx_mask[1])
 		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
 
-	for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
-		if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+			max_nss = (i / 8) + 1;
 			arg->peer_ht_rates.rates[n++] = i;
+		}
 
 	/*
 	 * This is a workaround for HT-enabled STAs which break the spec
@@ -1543,7 +2147,7 @@
 			arg->peer_ht_rates.rates[i] = i;
 	} else {
 		arg->peer_ht_rates.num_rates = n;
-		arg->peer_num_spatial_streams = sta->rx_nss;
+		arg->peer_num_spatial_streams = max_nss;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1619,19 +2223,84 @@
 	return 0;
 }
 
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+	int idx_limit;
+	int nss;
+	u16 mcs_map;
+	u16 mcs;
+
+	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+			  vht_mcs_limit[nss];
+
+		if (mcs_map)
+			idx_limit = fls(mcs_map) - 1;
+		else
+			idx_limit = -1;
+
+		switch (idx_limit) {
+		case 0: /* fall through */
+		case 1: /* fall through */
+		case 2: /* fall through */
+		case 3: /* fall through */
+		case 4: /* fall through */
+		case 5: /* fall through */
+		case 6: /* fall through */
+		default:
+			/* see ath10k_mac_can_set_bitrate_mask() */
+			WARN_ON(1);
+			/* fall through */
+		case -1:
+			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+			break;
+		case 7:
+			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+			break;
+		case 8:
+			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+			break;
+		case 9:
+			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+			break;
+		}
+
+		tx_mcs_set &= ~(0x3 << (nss * 2));
+		tx_mcs_set |= mcs << (nss * 2);
+	}
+
+	return tx_mcs_set;
+}
+
 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+				    struct ieee80211_vif *vif,
 				    struct ieee80211_sta *sta,
 				    struct wmi_peer_assoc_complete_arg *arg)
 {
 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u16 *vht_mcs_mask;
 	u8 ampdu_factor;
 
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
 	if (!vht_cap->vht_supported)
 		return;
 
+	band = def.chan->band;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+		return;
+
 	arg->peer_flags |= WMI_PEER_VHT;
 
-	if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+	if (def.chan->band == IEEE80211_BAND_2GHZ)
 		arg->peer_flags |= WMI_PEER_VHT_2G;
 
 	arg->peer_vht_caps = vht_cap->cap;
@@ -1657,8 +2326,8 @@
 		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
 	arg->peer_vht_rates.tx_max_rate =
 		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
-	arg->peer_vht_rates.tx_mcs_set =
-		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
 		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
@@ -1697,10 +2366,10 @@
 		   sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
 }
 
-static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
 {
-	/* First 4 rates in ath10k_rates are CCK (11b) rates. */
-	return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
+	return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
@@ -1708,21 +2377,35 @@
 					struct ieee80211_sta *sta,
 					struct wmi_peer_assoc_complete_arg *arg)
 {
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
-	switch (ar->hw->conf.chandef.chan->band) {
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	band = def.chan->band;
+	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	switch (band) {
 	case IEEE80211_BAND_2GHZ:
-		if (sta->vht_cap.vht_supported) {
+		if (sta->vht_cap.vht_supported &&
+		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11AC_VHT40;
 			else
 				phymode = MODE_11AC_VHT20;
-		} else if (sta->ht_cap.ht_supported) {
+		} else if (sta->ht_cap.ht_supported &&
+			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NG_HT40;
 			else
 				phymode = MODE_11NG_HT20;
-		} else if (ath10k_mac_sta_has_11g_rates(sta)) {
+		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
 			phymode = MODE_11G;
 		} else {
 			phymode = MODE_11B;
@@ -1733,15 +2416,17 @@
 		/*
 		 * Check VHT first.
 		 */
-		if (sta->vht_cap.vht_supported) {
+		if (sta->vht_cap.vht_supported &&
+		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
 			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
 				phymode = MODE_11AC_VHT80;
 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11AC_VHT40;
 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
 				phymode = MODE_11AC_VHT20;
-		} else if (sta->ht_cap.ht_supported) {
-			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+		} else if (sta->ht_cap.ht_supported &&
+			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NA_HT40;
 			else
 				phymode = MODE_11NA_HT20;
@@ -1772,9 +2457,9 @@
 
 	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
 	ath10k_peer_assoc_h_crypto(ar, vif, arg);
-	ath10k_peer_assoc_h_rates(ar, sta, arg);
-	ath10k_peer_assoc_h_ht(ar, sta, arg);
-	ath10k_peer_assoc_h_vht(ar, sta, arg);
+	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
 	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
 	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
 
@@ -1993,6 +2678,8 @@
 	}
 
 	arvif->is_up = false;
+
+	cancel_delayed_work_sync(&arvif->connection_loss_work);
 }
 
 static int ath10k_station_assoc(struct ath10k *ar,
@@ -2013,7 +2700,6 @@
 		return ret;
 	}
 
-	peer_arg.peer_reassoc = reassoc;
 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
 	if (ret) {
 		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -2274,6 +2960,149 @@
 /* TX handlers */
 /***************/
 
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
+{
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+	ar->tx_paused |= BIT(reason);
+	ieee80211_stop_queues(ar->hw);
+}
+
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+				      struct ieee80211_vif *vif)
+{
+	struct ath10k *ar = data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	if (arvif->tx_paused)
+		return;
+
+	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+	ar->tx_paused &= ~BIT(reason);
+
+	if (ar->tx_paused)
+		return;
+
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_RESUME_ALL,
+						   ath10k_mac_tx_unlock_iter,
+						   ar);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= BITS_PER_LONG);
+	arvif->tx_paused |= BIT(reason);
+	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= BITS_PER_LONG);
+	arvif->tx_paused &= ~BIT(reason);
+
+	if (ar->tx_paused)
+		return;
+
+	if (arvif->tx_paused)
+		return;
+
+	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+					   enum wmi_tlv_tx_pause_id pause_id,
+					   enum wmi_tlv_tx_pause_action action)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	switch (pause_id) {
+	case WMI_TLV_TX_PAUSE_ID_MCC:
+	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PS:
+	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+		switch (action) {
+		case WMI_TLV_TX_PAUSE_ACTION_STOP:
+			ath10k_mac_vif_tx_lock(arvif, pause_id);
+			break;
+		case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+			ath10k_mac_vif_tx_unlock(arvif, pause_id);
+			break;
+		default:
+			ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+				    action, arvif->vdev_id);
+			break;
+		}
+		break;
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+	case WMI_TLV_TX_PAUSE_ID_HOST:
+	default:
+		/* FIXME: Some pause_ids aren't vdev specific. Instead they
+		 * target peer_id and tid. Implementing these could improve
+		 * traffic scheduling fairness across multiple connected
+		 * stations in AP/IBSS modes.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unsupported tx pause vdev %i id %d\n",
+			   arvif->vdev_id, pause_id);
+		break;
+	}
+}
+
+struct ath10k_mac_tx_pause {
+	u32 vdev_id;
+	enum wmi_tlv_tx_pause_id pause_id;
+	enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_mac_tx_pause *arg = data;
+
+	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+				enum wmi_tlv_tx_pause_id pause_id,
+				enum wmi_tlv_tx_pause_action action)
+{
+	struct ath10k_mac_tx_pause arg = {
+		.vdev_id = vdev_id,
+		.pause_id = pause_id,
+		.action = action,
+	};
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_RESUME_ALL,
+						   ath10k_mac_handle_tx_pause_iter,
+						   &arg);
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
 static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
 {
 	if (ieee80211_is_mgmt(hdr->frame_control))
@@ -2300,6 +3129,52 @@
 	return 0;
 }
 
+static enum ath10k_hw_txrx_mode
+ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+	const struct ieee80211_hdr *hdr = (void *)skb->data;
+	__le16 fc = hdr->frame_control;
+
+	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+		return ATH10K_HW_TXRX_RAW;
+
+	if (ieee80211_is_mgmt(fc))
+		return ATH10K_HW_TXRX_MGMT;
+
+	/* Workaround:
+	 *
+	 * NullFunc frames are mostly used to ping if a client or AP are still
+	 * reachable and responsive. This implies tx status reports must be
+	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+	 * come to a conclusion that the other end disappeared and tear down
+	 * BSS connection or it can never disconnect from BSS/client (which is
+	 * the case).
+	 *
+	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
+	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+	 * which seems to deliver correct tx reports for NullFunc frames. The
+	 * downside of using it is it ignores client powersave state so it can
+	 * end up disconnecting sleeping clients in AP mode. It should fix STA
+	 * mode though because AP don't sleep.
+	 */
+	if (ar->htt.target_version_major < 3 &&
+	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+		return ATH10K_HW_TXRX_MGMT;
+
+	/* Workaround:
+	 *
+	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
+	 * to work with Ethernet txmode so use it.
+	 */
+	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+		return ATH10K_HW_TXRX_ETHERNET;
+
+	return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
  * Control in the header.
  */
@@ -2317,16 +3192,42 @@
 		skb->data, (void *)qos_ctl - (void *)skb->data);
 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
 
-	/* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
-	 * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
-	 * used only for CQM purposes (e.g. hostapd station keepalive ping) so
-	 * it is safe to downgrade to NullFunc.
+	/* Some firmware revisions don't handle sending QoS NullFunc well.
+	 * These frames are mainly used for CQM purposes so it doesn't really
+	 * matter whether QoS NullFunc or NullFunc are sent.
 	 */
 	hdr = (void *)skb->data;
-	if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
-		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
 		cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-	}
+
+	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr;
+	struct rfc1042_hdr *rfc1042;
+	struct ethhdr *eth;
+	size_t hdrlen;
+	u8 da[ETH_ALEN];
+	u8 sa[ETH_ALEN];
+	__be16 type;
+
+	hdr = (void *)skb->data;
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	rfc1042 = (void *)skb->data + hdrlen;
+
+	ether_addr_copy(da, ieee80211_get_DA(hdr));
+	ether_addr_copy(sa, ieee80211_get_SA(hdr));
+	type = rfc1042->snap_type;
+
+	skb_pull(skb, hdrlen + sizeof(*rfc1042));
+	skb_push(skb, sizeof(*eth));
+
+	eth = (void *)skb->data;
+	ether_addr_copy(eth->h_dest, da);
+	ether_addr_copy(eth->h_source, sa);
+	eth->h_proto = type;
 }
 
 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
@@ -2365,45 +3266,51 @@
 		 ar->htt.target_version_minor >= 4);
 }
 
-static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
 {
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
 	int ret = 0;
 
-	if (ar->htt.target_version_major >= 3) {
-		/* Since HTT 3.0 there is no separate mgmt tx command */
-		ret = ath10k_htt_tx(&ar->htt, skb);
-		goto exit;
+	spin_lock_bh(&ar->data_lock);
+
+	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+		ret = -ENOSPC;
+		goto unlock;
 	}
 
-	if (ieee80211_is_mgmt(hdr->frame_control)) {
+	__skb_queue_tail(q, skb);
+	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	return ret;
+}
+
+static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+	struct ath10k_htt *htt = &ar->htt;
+	int ret = 0;
+
+	switch (cb->txmode) {
+	case ATH10K_HW_TXRX_RAW:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+	case ATH10K_HW_TXRX_ETHERNET:
+		ret = ath10k_htt_tx(htt, skb);
+		break;
+	case ATH10K_HW_TXRX_MGMT:
 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-			     ar->fw_features)) {
-			if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
-			    ATH10K_MAX_NUM_MGMT_PENDING) {
-				ath10k_warn(ar, "reached WMI management transmit queue limit\n");
-				ret = -EBUSY;
-				goto exit;
-			}
-
-			skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
-			ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
-		} else {
-			ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-		}
-	} else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-			     ar->fw_features) &&
-		   ieee80211_is_nullfunc(hdr->frame_control)) {
-		/* FW does not report tx status properly for NullFunc frames
-		 * unless they are sent through mgmt tx path. mac80211 sends
-		 * those frames when it detects link/beacon loss and depends
-		 * on the tx status to be correct. */
-		ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-	} else {
-		ret = ath10k_htt_tx(&ar->htt, skb);
+			     ar->fw_features))
+			ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+		else if (ar->htt.target_version_major >= 3)
+			ret = ath10k_htt_tx(htt, skb);
+		else
+			ret = ath10k_htt_mgmt_tx(htt, skb);
+		break;
 	}
 
-exit:
 	if (ret) {
 		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
 			    ret);
@@ -2433,6 +3340,7 @@
 	const u8 *peer_addr;
 	int vdev_id;
 	int ret;
+	unsigned long time_left;
 
 	/* FW requirement: We must create a peer before FW will send out
 	 * an offchannel frame. Otherwise the frame will be stuck and
@@ -2465,7 +3373,8 @@
 				   peer_addr, vdev_id);
 
 		if (!peer) {
-			ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+			ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+						 WMI_PEER_TYPE_DEFAULT);
 			if (ret)
 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
 					    peer_addr, vdev_id, ret);
@@ -2476,11 +3385,11 @@
 		ar->offchan_tx_skb = skb;
 		spin_unlock_bh(&ar->data_lock);
 
-		ath10k_tx_htt(ar, skb);
+		ath10k_mac_tx(ar, skb);
 
-		ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
-						  3 * HZ);
-		if (ret == 0)
+		time_left =
+		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+		if (time_left == 0)
 			ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
 				    skb);
 
@@ -2700,21 +3609,38 @@
 	struct ath10k *ar = hw->priv;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_vif *vif = info->control.vif;
+	struct ieee80211_sta *sta = control->sta;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	__le16 fc = hdr->frame_control;
 
 	/* We should disable CCK RATE due to P2P */
 	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
 		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
 
 	ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+	ATH10K_SKB_CB(skb)->htt.freq = 0;
 	ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
 	ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+	ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
+	ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
 
-	/* it makes no sense to process injected frames like that */
-	if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+	switch (ATH10K_SKB_CB(skb)->txmode) {
+	case ATH10K_HW_TXRX_MGMT:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
 		ath10k_tx_h_nwifi(hw, skb);
 		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
 		ath10k_tx_h_seq_no(vif, skb);
+		break;
+	case ATH10K_HW_TXRX_ETHERNET:
+		ath10k_tx_h_8023(skb);
+		break;
+	case ATH10K_HW_TXRX_RAW:
+		/* FIXME: Packet injection isn't implemented. It should be
+		 * doable with firmware 10.2 on qca988x.
+		 */
+		WARN_ON_ONCE(1);
+		ieee80211_free_txskb(hw, skb);
+		return;
 	}
 
 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -2736,7 +3662,7 @@
 		}
 	}
 
-	ath10k_tx_htt(ar, skb);
+	ath10k_mac_tx(ar, skb);
 }
 
 /* Must not be called with conf_mutex held as workers can use that also. */
@@ -2761,11 +3687,13 @@
 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
 	ar->filter_flags = 0;
 	ar->monitor = false;
+	ar->monitor_arvif = NULL;
 
 	if (ar->monitor_started)
 		ath10k_monitor_stop(ar);
 
 	ar->monitor_started = false;
+	ar->tx_paused = 0;
 
 	ath10k_scan_finish(ar);
 	ath10k_peer_cleanup_all(ar);
@@ -2859,6 +3787,7 @@
 static int ath10k_start(struct ieee80211_hw *hw)
 {
 	struct ath10k *ar = hw->priv;
+	u32 burst_enable;
 	int ret = 0;
 
 	/*
@@ -2913,6 +3842,24 @@
 		goto err_core_stop;
 	}
 
+	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+		ret = ath10k_wmi_adaptive_qcs(ar, true);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+				    ret);
+			goto err_core_stop;
+		}
+	}
+
+	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+		burst_enable = ar->wmi.pdev_param->burst_enable;
+		ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+		if (ret) {
+			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+			goto err_core_stop;
+		}
+	}
+
 	if (ar->cfg_tx_chainmask)
 		__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
 				     ar->cfg_rx_chainmask);
@@ -2934,10 +3881,21 @@
 		goto err_core_stop;
 	}
 
+	ret = ath10k_wmi_pdev_set_param(ar,
+					ar->wmi.pdev_param->ani_enable, 1);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable ani by default: %d\n",
+			    ret);
+		goto err_core_stop;
+	}
+
+	ar->ani_enabled = true;
+
 	ar->num_started_vdevs = 0;
 	ath10k_regd_update(ar);
 
 	ath10k_spectral_start(ar);
+	ath10k_thermal_set_throttling(ar);
 
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
@@ -2991,97 +3949,6 @@
 	return ret;
 }
 
-static const char *chandef_get_width(enum nl80211_chan_width width)
-{
-	switch (width) {
-	case NL80211_CHAN_WIDTH_20_NOHT:
-		return "20 (noht)";
-	case NL80211_CHAN_WIDTH_20:
-		return "20";
-	case NL80211_CHAN_WIDTH_40:
-		return "40";
-	case NL80211_CHAN_WIDTH_80:
-		return "80";
-	case NL80211_CHAN_WIDTH_80P80:
-		return "80+80";
-	case NL80211_CHAN_WIDTH_160:
-		return "160";
-	case NL80211_CHAN_WIDTH_5:
-		return "5";
-	case NL80211_CHAN_WIDTH_10:
-		return "10";
-	}
-	return "?";
-}
-
-static void ath10k_config_chan(struct ath10k *ar)
-{
-	struct ath10k_vif *arvif;
-	int ret;
-
-	lockdep_assert_held(&ar->conf_mutex);
-
-	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
-		   ar->chandef.chan->center_freq,
-		   ar->chandef.center_freq1,
-		   ar->chandef.center_freq2,
-		   chandef_get_width(ar->chandef.width));
-
-	/* First stop monitor interface. Some FW versions crash if there's a
-	 * lone monitor interface. */
-	if (ar->monitor_started)
-		ath10k_monitor_stop(ar);
-
-	list_for_each_entry(arvif, &ar->arvifs, list) {
-		if (!arvif->is_started)
-			continue;
-
-		if (!arvif->is_up)
-			continue;
-
-		if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
-			continue;
-
-		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
-		if (ret) {
-			ath10k_warn(ar, "failed to down vdev %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
-		}
-	}
-
-	/* all vdevs are downed now - attempt to restart and re-up them */
-
-	list_for_each_entry(arvif, &ar->arvifs, list) {
-		if (!arvif->is_started)
-			continue;
-
-		if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
-			continue;
-
-		ret = ath10k_vdev_restart(arvif);
-		if (ret) {
-			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
-		}
-
-		if (!arvif->is_up)
-			continue;
-
-		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
-					 arvif->bssid);
-		if (ret) {
-			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
-		}
-	}
-
-	ath10k_monitor_recalc(ar);
-}
-
 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
 {
 	int ret;
@@ -3147,26 +4014,6 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-		ath10k_dbg(ar, ATH10K_DBG_MAC,
-			   "mac config channel %dMHz flags 0x%x radar %d\n",
-			   conf->chandef.chan->center_freq,
-			   conf->chandef.chan->flags,
-			   conf->radar_enabled);
-
-		spin_lock_bh(&ar->data_lock);
-		ar->rx_channel = conf->chandef.chan;
-		spin_unlock_bh(&ar->data_lock);
-
-		ar->radar_enabled = conf->radar_enabled;
-		ath10k_recalc_radar_detection(ar);
-
-		if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
-			ar->chandef = conf->chandef;
-			ath10k_config_chan(ar);
-		}
-	}
-
 	if (changed & IEEE80211_CONF_CHANGE_PS)
 		ath10k_config_ps(ar);
 
@@ -3208,6 +4055,7 @@
 	int ret = 0;
 	u32 value;
 	int bit;
+	int i;
 	u32 vdev_param;
 
 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@@ -3220,6 +4068,17 @@
 	arvif->vif = vif;
 
 	INIT_LIST_HEAD(&arvif->list);
+	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+	INIT_DELAYED_WORK(&arvif->connection_loss_work,
+			  ath10k_mac_vif_sta_connection_loss_work);
+
+	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+	}
 
 	if (ar->free_vdev_map == 0) {
 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
@@ -3262,6 +4121,15 @@
 		break;
 	}
 
+	/* Using vdev_id as queue number will make it very easy to do per-vif
+	 * tx queue locking. This shouldn't wrap due to interface combinations
+	 * but do a modulo for correctness sake and prevent using offchannel tx
+	 * queues for regular vif tx.
+	 */
+	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
 	/* Some firmware revisions don't wait for beacon tx completion before
 	 * sending another SWBA event. This could lead to hardware using old
 	 * (freed) beacon data in some cases, e.g. tx credit starvation
@@ -3343,14 +4211,18 @@
 		}
 	}
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
+					 WMI_PEER_TYPE_DEFAULT);
 		if (ret) {
-			ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
+			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
 				    arvif->vdev_id, ret);
 			goto err_vdev_delete;
 		}
+	}
 
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
 		ret = ath10k_mac_set_kickout(arvif);
 		if (ret) {
 			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
@@ -3406,11 +4278,21 @@
 		goto err_peer_delete;
 	}
 
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		ar->monitor_arvif = arvif;
+		ret = ath10k_monitor_recalc(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+			goto err_peer_delete;
+		}
+	}
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
 err_peer_delete:
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
 		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
 
 err_vdev_delete:
@@ -3430,6 +4312,14 @@
 	return ret;
 }
 
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+	int i;
+
+	for (i = 0; i < BITS_PER_LONG; i++)
+		ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
 static void ath10k_remove_interface(struct ieee80211_hw *hw,
 				    struct ieee80211_vif *vif)
 {
@@ -3437,6 +4327,9 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	int ret;
 
+	cancel_work_sync(&arvif->ap_csa_work);
+	cancel_delayed_work_sync(&arvif->connection_loss_work);
+
 	mutex_lock(&ar->conf_mutex);
 
 	spin_lock_bh(&ar->data_lock);
@@ -3451,11 +4344,12 @@
 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
 	list_del(&arvif->list);
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
 		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
 					     vif->addr);
 		if (ret)
-			ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
+			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
 				    arvif->vdev_id, ret);
 
 		kfree(arvif->u.ap.noa_data);
@@ -3472,7 +4366,8 @@
 	/* Some firmware revisions don't notify host about self-peer removal
 	 * until after associated vdev is deleted.
 	 */
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
 		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
 						   vif->addr);
 		if (ret)
@@ -3486,6 +4381,17 @@
 
 	ath10k_peer_cleanup(ar, arvif->vdev_id);
 
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		ar->monitor_arvif = NULL;
+		ret = ath10k_monitor_recalc(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+	}
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	ath10k_mac_vif_tx_unlock_all(arvif);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
 	mutex_unlock(&ar->conf_mutex);
 }
 
@@ -3493,8 +4399,7 @@
  * FIXME: Has to be verified.
  */
 #define SUPPORTED_FILTERS			\
-	(FIF_PROMISC_IN_BSS |			\
-	FIF_ALLMULTI |				\
+	(FIF_ALLMULTI |				\
 	FIF_CONTROL |				\
 	FIF_PSPOLL |				\
 	FIF_OTHER_BSS |				\
@@ -3615,6 +4520,13 @@
 		if (ret)
 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
 				    arvif->vdev_id, ret);
+
+		vdev_param = ar->wmi.vdev_param->protection_mode;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						info->use_cts_prot ? 1 : 0);
+		if (ret)
+			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
+					info->use_cts_prot, arvif->vdev_id, ret);
 	}
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -3791,10 +4703,14 @@
 	 * frames with multi-vif APs. This is not required for main firmware
 	 * branch (e.g. 636).
 	 *
-	 * FIXME: This has been tested only in AP. It remains unknown if this
-	 * is required for multi-vif STA interfaces on 10.1 */
+	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+	 *
+	 * FIXME: It remains unknown if this is required for multi-vif STA
+	 * interfaces on 10.1.
+	 */
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
 		return;
 
 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
@@ -3826,8 +4742,14 @@
 	const u8 *peer_addr;
 	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
 		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
-	bool def_idx = false;
 	int ret = 0;
+	int ret2;
+	u32 flags = 0;
+	u32 flags2;
+
+	/* this one needs to be done in software */
+	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+		return 1;
 
 	if (key->keyidx > WMI_MAX_KEY_INDEX)
 		return -ENOSPC;
@@ -3843,6 +4765,13 @@
 
 	key->hw_key_idx = key->keyidx;
 
+	if (is_wep) {
+		if (cmd == SET_KEY)
+			arvif->wep_keys[key->keyidx] = key;
+		else
+			arvif->wep_keys[key->keyidx] = NULL;
+	}
+
 	/* the peer should not disappear in mid-way (unless FW goes awry) since
 	 * we already hold conf_mutex. we just make sure its there now. */
 	spin_lock_bh(&ar->data_lock);
@@ -3862,30 +4791,61 @@
 		}
 	}
 
-	if (is_wep) {
-		if (cmd == SET_KEY)
-			arvif->wep_keys[key->keyidx] = key;
-		else
-			arvif->wep_keys[key->keyidx] = NULL;
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+		flags |= WMI_KEY_PAIRWISE;
+	else
+		flags |= WMI_KEY_GROUP;
 
+	if (is_wep) {
 		if (cmd == DISABLE_KEY)
 			ath10k_clear_vdev_key(arvif, key);
+
+		/* When WEP keys are uploaded it's possible that there are
+		 * stations associated already (e.g. when merging) without any
+		 * keys. Static WEP needs an explicit per-peer key upload.
+		 */
+		if (vif->type == NL80211_IFTYPE_ADHOC &&
+		    cmd == SET_KEY)
+			ath10k_mac_vif_update_wep_key(arvif, key);
+
+		/* 802.1x never sets the def_wep_key_idx so each set_key()
+		 * call changes default tx key.
+		 *
+		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+		 * after first set_key().
+		 */
+		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+			flags |= WMI_KEY_TX_USAGE;
 	}
 
-	/* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
-	 * static WEP, do not set this flag for the keys whose key id
-	 * is  greater than default key id.
-	 */
-	if (arvif->def_wep_key_idx == -1)
-		def_idx = true;
-
-	ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
+	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
 	if (ret) {
 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
 			    arvif->vdev_id, peer_addr, ret);
 		goto exit;
 	}
 
+	/* mac80211 sets static WEP keys as groupwise while firmware requires
+	 * them to be installed twice as both pairwise and groupwise.
+	 */
+	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+		flags2 = flags;
+		flags2 &= ~WMI_KEY_GROUP;
+		flags2 |= WMI_KEY_PAIRWISE;
+
+		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+		if (ret) {
+			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+				    arvif->vdev_id, peer_addr, ret);
+			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+						  peer_addr, flags);
+			if (ret2)
+				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+					    arvif->vdev_id, peer_addr, ret2);
+			goto exit;
+		}
+	}
+
 	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
 
 	spin_lock_bh(&ar->data_lock);
@@ -3933,6 +4893,7 @@
 	}
 
 	arvif->def_wep_key_idx = keyidx;
+
 unlock:
 	mutex_unlock(&arvif->ar->conf_mutex);
 }
@@ -3943,6 +4904,10 @@
 	struct ath10k_vif *arvif;
 	struct ath10k_sta *arsta;
 	struct ieee80211_sta *sta;
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
 	u32 changed, bw, nss, smps;
 	int err;
 
@@ -3951,6 +4916,13 @@
 	arvif = arsta->arvif;
 	ar = arvif->ar;
 
+	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+		return;
+
+	band = def.chan->band;
+	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
 	spin_lock_bh(&ar->data_lock);
 
 	changed = arsta->changed;
@@ -3964,6 +4936,10 @@
 
 	mutex_lock(&ar->conf_mutex);
 
+	nss = max_t(u32, 1, nss);
+	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
 	if (changed & IEEE80211_RC_BW_CHANGED) {
 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
 			   sta->addr, bw);
@@ -4011,14 +4987,14 @@
 	mutex_unlock(&ar->conf_mutex);
 }
 
-static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+				       struct ieee80211_sta *sta)
 {
 	struct ath10k *ar = arvif->ar;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
 		return 0;
 
 	if (ar->num_stations >= ar->max_num_stations)
@@ -4029,19 +5005,72 @@
 	return 0;
 }
 
-static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+					struct ieee80211_sta *sta)
 {
 	struct ath10k *ar = arvif->ar;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
 		return;
 
 	ar->num_stations--;
 }
 
+struct ath10k_mac_tdls_iter_data {
+	u32 num_tdls_stations;
+	struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+						    struct ieee80211_sta *sta)
+{
+	struct ath10k_mac_tdls_iter_data *iter_data = data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+	if (sta->tdls && sta_vif == iter_data->curr_vif)
+		iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+					      struct ieee80211_vif *vif)
+{
+	struct ath10k_mac_tdls_iter_data data = {};
+
+	data.curr_vif = vif;
+
+	ieee80211_iterate_stations_atomic(hw,
+					  ath10k_mac_tdls_vif_stations_count_iter,
+					  &data);
+	return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int *num_tdls_vifs = data;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+		(*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+	int num_tdls_vifs = 0;
+
+	ieee80211_iterate_active_interfaces_atomic(hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_mac_tdls_vifs_count_iter,
+						   &num_tdls_vifs);
+	return num_tdls_vifs;
+}
+
 static int ath10k_sta_state(struct ieee80211_hw *hw,
 			    struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta,
@@ -4072,41 +5101,80 @@
 		/*
 		 * New station addition.
 		 */
+		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+		u32 num_tdls_stations;
+		u32 num_tdls_vifs;
+
 		ath10k_dbg(ar, ATH10K_DBG_MAC,
 			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
 			   arvif->vdev_id, sta->addr,
 			   ar->num_stations + 1, ar->max_num_stations,
 			   ar->num_peers + 1, ar->max_num_peers);
 
-		ret = ath10k_mac_inc_num_stations(arvif);
+		ret = ath10k_mac_inc_num_stations(arvif, sta);
 		if (ret) {
 			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
 				    ar->max_num_stations);
 			goto exit;
 		}
 
-		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+		if (sta->tdls)
+			peer_type = WMI_PEER_TYPE_TDLS;
+
+		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
+					 peer_type);
 		if (ret) {
 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
 				    sta->addr, arvif->vdev_id, ret);
-			ath10k_mac_dec_num_stations(arvif);
+			ath10k_mac_dec_num_stations(arvif, sta);
 			goto exit;
 		}
 
-		if (vif->type == NL80211_IFTYPE_STATION) {
-			WARN_ON(arvif->is_started);
+		if (!sta->tdls)
+			goto exit;
 
-			ret = ath10k_vdev_start(arvif);
+		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
+
+		if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
+		    num_tdls_stations == 0) {
+			ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+				    arvif->vdev_id, ar->max_num_tdls_vdevs);
+			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+			ath10k_mac_dec_num_stations(arvif, sta);
+			ret = -ENOBUFS;
+			goto exit;
+		}
+
+		if (num_tdls_stations == 0) {
+			/* This is the first tdls peer in current vif */
+			enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
+
+			ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+							      state);
 			if (ret) {
-				ath10k_warn(ar, "failed to start vdev %i: %d\n",
+				ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
 					    arvif->vdev_id, ret);
-				WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
-							   sta->addr));
-				ath10k_mac_dec_num_stations(arvif);
+				ath10k_peer_delete(ar, arvif->vdev_id,
+						   sta->addr);
+				ath10k_mac_dec_num_stations(arvif, sta);
 				goto exit;
 			}
+		}
 
-			arvif->is_started = true;
+		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+						  WMI_TDLS_PEER_STATE_PEERING);
+		if (ret) {
+			ath10k_warn(ar,
+				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+			ath10k_mac_dec_num_stations(arvif, sta);
+
+			if (num_tdls_stations != 0)
+				goto exit;
+			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+							WMI_TDLS_DISABLE);
 		}
 	} else if ((old_state == IEEE80211_STA_NONE &&
 		    new_state == IEEE80211_STA_NOTEXIST)) {
@@ -4117,23 +5185,26 @@
 			   "mac vdev %d peer delete %pM (sta gone)\n",
 			   arvif->vdev_id, sta->addr);
 
-		if (vif->type == NL80211_IFTYPE_STATION) {
-			WARN_ON(!arvif->is_started);
-
-			ret = ath10k_vdev_stop(arvif);
-			if (ret)
-				ath10k_warn(ar, "failed to stop vdev %i: %d\n",
-					    arvif->vdev_id, ret);
-
-			arvif->is_started = false;
-		}
-
 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
 		if (ret)
 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
 				    sta->addr, arvif->vdev_id, ret);
 
-		ath10k_mac_dec_num_stations(arvif);
+		ath10k_mac_dec_num_stations(arvif, sta);
+
+		if (!sta->tdls)
+			goto exit;
+
+		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+			goto exit;
+
+		/* This was the last tdls peer in current vif */
+		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+						      WMI_TDLS_DISABLE);
+		if (ret) {
+			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+				    arvif->vdev_id, ret);
+		}
 	} else if (old_state == IEEE80211_STA_AUTH &&
 		   new_state == IEEE80211_STA_ASSOC &&
 		   (vif->type == NL80211_IFTYPE_AP ||
@@ -4149,9 +5220,30 @@
 			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
 				    sta->addr, arvif->vdev_id, ret);
 	} else if (old_state == IEEE80211_STA_ASSOC &&
-		   new_state == IEEE80211_STA_AUTH &&
-		   (vif->type == NL80211_IFTYPE_AP ||
-		    vif->type == NL80211_IFTYPE_ADHOC)) {
+		   new_state == IEEE80211_STA_AUTHORIZED &&
+		   sta->tdls) {
+		/*
+		 * Tdls station authorized.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+			   sta->addr);
+
+		ret = ath10k_station_assoc(ar, vif, sta, false);
+		if (ret) {
+			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+			goto exit;
+		}
+
+		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+						  WMI_TDLS_PEER_STATE_CONNECTED);
+		if (ret)
+			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		    new_state == IEEE80211_STA_AUTH &&
+		    (vif->type == NL80211_IFTYPE_AP ||
+		     vif->type == NL80211_IFTYPE_ADHOC)) {
 		/*
 		 * Disassociation.
 		 */
@@ -4356,6 +5448,7 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct wmi_start_scan_arg arg;
 	int ret = 0;
+	u32 scan_time_msec;
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -4382,7 +5475,7 @@
 	if (ret)
 		goto exit;
 
-	duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
 
 	memset(&arg, 0, sizeof(arg));
 	ath10k_wmi_start_scan_init(ar, &arg);
@@ -4390,11 +5483,12 @@
 	arg.scan_id = ATH10K_SCAN_ID;
 	arg.n_channels = 1;
 	arg.channels[0] = chan->center_freq;
-	arg.dwell_time_active = duration;
-	arg.dwell_time_passive = duration;
-	arg.max_scan_time = 2 * duration;
+	arg.dwell_time_active = scan_time_msec;
+	arg.dwell_time_passive = scan_time_msec;
+	arg.max_scan_time = scan_time_msec;
 	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
 	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+	arg.burst_duration_ms = duration;
 
 	ret = ath10k_start_scan(ar, &arg);
 	if (ret) {
@@ -4417,6 +5511,9 @@
 		goto exit;
 	}
 
+	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+				     msecs_to_jiffies(duration));
+
 	ret = 0;
 exit:
 	mutex_unlock(&ar->conf_mutex);
@@ -4512,70 +5609,6 @@
 	return 1;
 }
 
-#ifdef CONFIG_PM
-static int ath10k_suspend(struct ieee80211_hw *hw,
-			  struct cfg80211_wowlan *wowlan)
-{
-	struct ath10k *ar = hw->priv;
-	int ret;
-
-	mutex_lock(&ar->conf_mutex);
-
-	ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
-	if (ret) {
-		if (ret == -ETIMEDOUT)
-			goto resume;
-		ret = 1;
-		goto exit;
-	}
-
-	ret = ath10k_hif_suspend(ar);
-	if (ret) {
-		ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
-		goto resume;
-	}
-
-	ret = 0;
-	goto exit;
-resume:
-	ret = ath10k_wmi_pdev_resume_target(ar);
-	if (ret)
-		ath10k_warn(ar, "failed to resume target: %d\n", ret);
-
-	ret = 1;
-exit:
-	mutex_unlock(&ar->conf_mutex);
-	return ret;
-}
-
-static int ath10k_resume(struct ieee80211_hw *hw)
-{
-	struct ath10k *ar = hw->priv;
-	int ret;
-
-	mutex_lock(&ar->conf_mutex);
-
-	ret = ath10k_hif_resume(ar);
-	if (ret) {
-		ath10k_warn(ar, "failed to resume hif: %d\n", ret);
-		ret = 1;
-		goto exit;
-	}
-
-	ret = ath10k_wmi_pdev_resume_target(ar);
-	if (ret) {
-		ath10k_warn(ar, "failed to resume target: %d\n", ret);
-		ret = 1;
-		goto exit;
-	}
-
-	ret = 0;
-exit:
-	mutex_unlock(&ar->conf_mutex);
-	return ret;
-}
-#endif
-
 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
 				     enum ieee80211_reconfig_type reconfig_type)
 {
@@ -4635,345 +5668,288 @@
 	return ret;
 }
 
-/* Helper table for legacy fixed_rate/bitrate_mask */
-static const u8 cck_ofdm_rate[] = {
-	/* CCK */
-	3, /* 1Mbps */
-	2, /* 2Mbps */
-	1, /* 5.5Mbps */
-	0, /* 11Mbps */
-	/* OFDM */
-	3, /* 6Mbps */
-	7, /* 9Mbps */
-	2, /* 12Mbps */
-	6, /* 18Mbps */
-	1, /* 24Mbps */
-	5, /* 36Mbps */
-	0, /* 48Mbps */
-	4, /* 54Mbps */
-};
-
-/* Check if only one bit set */
-static int ath10k_check_single_mask(u32 mask)
+static bool
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+					enum ieee80211_band band,
+					const struct cfg80211_bitrate_mask *mask)
 {
-	int bit;
+	int num_rates = 0;
+	int i;
 
-	bit = ffs(mask);
-	if (!bit)
+	num_rates += hweight32(mask->control[band].legacy);
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+		num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+		num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+	return num_rates == 1;
+}
+
+static bool
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+				       enum ieee80211_band band,
+				       const struct cfg80211_bitrate_mask *mask,
+				       int *nss)
+{
+	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+	u8 ht_nss_mask = 0;
+	u8 vht_nss_mask = 0;
+	int i;
+
+	if (mask->control[band].legacy)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+		if (mask->control[band].ht_mcs[i] == 0)
+			continue;
+		else if (mask->control[band].ht_mcs[i] ==
+			 sband->ht_cap.mcs.rx_mask[i])
+			ht_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+		if (mask->control[band].vht_mcs[i] == 0)
+			continue;
+		else if (mask->control[band].vht_mcs[i] ==
+			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+			vht_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	if (ht_nss_mask != vht_nss_mask)
+		return false;
+
+	if (ht_nss_mask == 0)
+		return false;
+
+	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+		return false;
+
+	*nss = fls(ht_nss_mask);
+
+	return true;
+}
+
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+					enum ieee80211_band band,
+					const struct cfg80211_bitrate_mask *mask,
+					u8 *rate, u8 *nss)
+{
+	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+	int rate_idx;
+	int i;
+	u16 bitrate;
+	u8 preamble;
+	u8 hw_rate;
+
+	if (hweight32(mask->control[band].legacy) == 1) {
+		rate_idx = ffs(mask->control[band].legacy) - 1;
+
+		hw_rate = sband->bitrates[rate_idx].hw_value;
+		bitrate = sband->bitrates[rate_idx].bitrate;
+
+		if (ath10k_mac_bitrate_is_cck(bitrate))
+			preamble = WMI_RATE_PREAMBLE_CCK;
+		else
+			preamble = WMI_RATE_PREAMBLE_OFDM;
+
+		*nss = 1;
+		*rate = preamble << 6 |
+			(*nss - 1) << 4 |
+			hw_rate << 0;
+
 		return 0;
-
-	mask &= ~BIT(bit - 1);
-	if (mask)
-		return 2;
-
-	return 1;
-}
-
-static bool
-ath10k_default_bitrate_mask(struct ath10k *ar,
-			    enum ieee80211_band band,
-			    const struct cfg80211_bitrate_mask *mask)
-{
-	u32 legacy = 0x00ff;
-	u8 ht = 0xff, i;
-	u16 vht = 0x3ff;
-	u16 nrf = ar->num_rf_chains;
-
-	if (ar->cfg_tx_chainmask)
-		nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
-
-	switch (band) {
-	case IEEE80211_BAND_2GHZ:
-		legacy = 0x00fff;
-		vht = 0;
-		break;
-	case IEEE80211_BAND_5GHZ:
-		break;
-	default:
-		return false;
 	}
 
-	if (mask->control[band].legacy != legacy)
-		return false;
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+			*nss = i + 1;
+			*rate = WMI_RATE_PREAMBLE_HT << 6 |
+				(*nss - 1) << 4 |
+				(ffs(mask->control[band].ht_mcs[i]) - 1);
 
-	for (i = 0; i < nrf; i++)
-		if (mask->control[band].ht_mcs[i] != ht)
-			return false;
-
-	for (i = 0; i < nrf; i++)
-		if (mask->control[band].vht_mcs[i] != vht)
-			return false;
-
-	return true;
-}
-
-static bool
-ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
-			enum ieee80211_band band,
-			u8 *fixed_nss)
-{
-	int ht_nss = 0, vht_nss = 0, i;
-
-	/* check legacy */
-	if (ath10k_check_single_mask(mask->control[band].legacy))
-		return false;
-
-	/* check HT */
-	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
-		if (mask->control[band].ht_mcs[i] == 0xff)
-			continue;
-		else if (mask->control[band].ht_mcs[i] == 0x00)
-			break;
-
-		return false;
+			return 0;
+		}
 	}
 
-	ht_nss = i;
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+			*nss = i + 1;
+			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
+				(*nss - 1) << 4 |
+				(ffs(mask->control[band].vht_mcs[i]) - 1);
 
-	/* check VHT */
-	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
-		if (mask->control[band].vht_mcs[i] == 0x03ff)
-			continue;
-		else if (mask->control[band].vht_mcs[i] == 0x0000)
-			break;
-
-		return false;
+			return 0;
+		}
 	}
 
-	vht_nss = i;
-
-	if (ht_nss > 0 && vht_nss > 0)
-		return false;
-
-	if (ht_nss)
-		*fixed_nss = ht_nss;
-	else if (vht_nss)
-		*fixed_nss = vht_nss;
-	else
-		return false;
-
-	return true;
+	return -EINVAL;
 }
 
-static bool
-ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
-			    enum ieee80211_band band,
-			    enum wmi_rate_preamble *preamble)
-{
-	int legacy = 0, ht = 0, vht = 0, i;
-
-	*preamble = WMI_RATE_PREAMBLE_OFDM;
-
-	/* check legacy */
-	legacy = ath10k_check_single_mask(mask->control[band].legacy);
-	if (legacy > 1)
-		return false;
-
-	/* check HT */
-	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
-		ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
-	if (ht > 1)
-		return false;
-
-	/* check VHT */
-	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
-		vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
-	if (vht > 1)
-		return false;
-
-	/* Currently we support only one fixed_rate */
-	if ((legacy + ht + vht) != 1)
-		return false;
-
-	if (ht)
-		*preamble = WMI_RATE_PREAMBLE_HT;
-	else if (vht)
-		*preamble = WMI_RATE_PREAMBLE_VHT;
-
-	return true;
-}
-
-static bool
-ath10k_bitrate_mask_rate(struct ath10k *ar,
-			 const struct cfg80211_bitrate_mask *mask,
-			 enum ieee80211_band band,
-			 u8 *fixed_rate,
-			 u8 *fixed_nss)
-{
-	u8 rate = 0, pream = 0, nss = 0, i;
-	enum wmi_rate_preamble preamble;
-
-	/* Check if single rate correct */
-	if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
-		return false;
-
-	pream = preamble;
-
-	switch (preamble) {
-	case WMI_RATE_PREAMBLE_CCK:
-	case WMI_RATE_PREAMBLE_OFDM:
-		i = ffs(mask->control[band].legacy) - 1;
-
-		if (band == IEEE80211_BAND_2GHZ && i < 4)
-			pream = WMI_RATE_PREAMBLE_CCK;
-
-		if (band == IEEE80211_BAND_5GHZ)
-			i += 4;
-
-		if (i >= ARRAY_SIZE(cck_ofdm_rate))
-			return false;
-
-		rate = cck_ofdm_rate[i];
-		break;
-	case WMI_RATE_PREAMBLE_HT:
-		for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
-			if (mask->control[band].ht_mcs[i])
-				break;
-
-		if (i == IEEE80211_HT_MCS_MASK_LEN)
-			return false;
-
-		rate = ffs(mask->control[band].ht_mcs[i]) - 1;
-		nss = i;
-		break;
-	case WMI_RATE_PREAMBLE_VHT:
-		for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
-			if (mask->control[band].vht_mcs[i])
-				break;
-
-		if (i == NL80211_VHT_NSS_MAX)
-			return false;
-
-		rate = ffs(mask->control[band].vht_mcs[i]) - 1;
-		nss = i;
-		break;
-	}
-
-	*fixed_nss = nss + 1;
-	nss <<= 4;
-	pream <<= 6;
-
-	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
-		   pream, nss, rate);
-
-	*fixed_rate = pream | nss | rate;
-
-	return true;
-}
-
-static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
-				      const struct cfg80211_bitrate_mask *mask,
-				      enum ieee80211_band band,
-				      u8 *fixed_rate,
-				      u8 *fixed_nss)
-{
-	/* First check full NSS mask, if we can simply limit NSS */
-	if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
-		return true;
-
-	/* Next Check single rate is set */
-	return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
-}
-
-static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
-				       u8 fixed_rate,
-				       u8 fixed_nss,
-				       u8 force_sgi)
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+					    u8 rate, u8 nss, u8 sgi)
 {
 	struct ath10k *ar = arvif->ar;
 	u32 vdev_param;
-	int ret = 0;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+		   arvif->vdev_id, rate, nss, sgi);
+
+	vdev_param = ar->wmi.vdev_param->fixed_rate;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
+	if (ret) {
+		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
+			    rate, ret);
+		return ret;
+	}
+
+	vdev_param = ar->wmi.vdev_param->nss;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+	if (ret) {
+		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+		return ret;
+	}
+
+	vdev_param = ar->wmi.vdev_param->sgi;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
+	if (ret) {
+		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+				enum ieee80211_band band,
+				const struct cfg80211_bitrate_mask *mask)
+{
+	int i;
+	u16 vht_mcs;
+
+	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+	 * to express all VHT MCS rate masks. Effectively only the following
+	 * ranges can be used: none, 0-7, 0-8 and 0-9.
+	 */
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		vht_mcs = mask->control[band].vht_mcs[i];
+
+		switch (vht_mcs) {
+		case 0:
+		case BIT(8) - 1:
+		case BIT(9) - 1:
+		case BIT(10) - 1:
+			break;
+		default:
+			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+					     struct ieee80211_sta *sta)
+{
+	struct ath10k_vif *arvif = data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arvif->ar;
+
+	if (arsta->arvif != arvif)
+		return;
+
+	spin_lock_bh(&ar->data_lock);
+	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+	spin_unlock_bh(&ar->data_lock);
+
+	ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+					  struct ieee80211_vif *vif,
+					  const struct cfg80211_bitrate_mask *mask)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	struct ath10k *ar = arvif->ar;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
+	u8 rate;
+	u8 nss;
+	u8 sgi;
+	int single_nss;
+	int ret;
+
+	if (ath10k_mac_vif_chan(vif, &def))
+		return -EPERM;
+
+	band = def.chan->band;
+	ht_mcs_mask = mask->control[band].ht_mcs;
+	vht_mcs_mask = mask->control[band].vht_mcs;
+
+	sgi = mask->control[band].gi;
+	if (sgi == NL80211_TXRATE_FORCE_LGI)
+		return -EINVAL;
+
+	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+							      &rate, &nss);
+		if (ret) {
+			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+							  &single_nss)) {
+		rate = WMI_FIXED_RATE_NONE;
+		nss = single_nss;
+	} else {
+		rate = WMI_FIXED_RATE_NONE;
+		nss = min(ar->num_rf_chains,
+			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
+			return -EINVAL;
+
+		mutex_lock(&ar->conf_mutex);
+
+		arvif->bitrate_mask = *mask;
+		ieee80211_iterate_stations_atomic(ar->hw,
+						  ath10k_mac_set_bitrate_mask_iter,
+						  arvif);
+
+		mutex_unlock(&ar->conf_mutex);
+	}
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (arvif->fixed_rate == fixed_rate &&
-	    arvif->fixed_nss == fixed_nss &&
-	    arvif->force_sgi == force_sgi)
-		goto exit;
-
-	if (fixed_rate == WMI_FIXED_RATE_NONE)
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
-
-	if (force_sgi)
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
-
-	vdev_param = ar->wmi.vdev_param->fixed_rate;
-	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-					vdev_param, fixed_rate);
+	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi);
 	if (ret) {
-		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
-			    fixed_rate, ret);
-		ret = -EINVAL;
+		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
 		goto exit;
 	}
 
-	arvif->fixed_rate = fixed_rate;
-
-	vdev_param = ar->wmi.vdev_param->nss;
-	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-					vdev_param, fixed_nss);
-
-	if (ret) {
-		ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
-			    fixed_nss, ret);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	arvif->fixed_nss = fixed_nss;
-
-	vdev_param = ar->wmi.vdev_param->sgi;
-	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-					force_sgi);
-
-	if (ret) {
-		ath10k_warn(ar, "failed to set sgi param %d: %d\n",
-			    force_sgi, ret);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	arvif->force_sgi = force_sgi;
-
 exit:
 	mutex_unlock(&ar->conf_mutex);
+
 	return ret;
 }
 
-static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   const struct cfg80211_bitrate_mask *mask)
-{
-	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-	struct ath10k *ar = arvif->ar;
-	enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
-	u8 fixed_rate = WMI_FIXED_RATE_NONE;
-	u8 fixed_nss = ar->num_rf_chains;
-	u8 force_sgi;
-
-	if (ar->cfg_tx_chainmask)
-		fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
-
-	force_sgi = mask->control[band].gi;
-	if (force_sgi == NL80211_TXRATE_FORCE_LGI)
-		return -EINVAL;
-
-	if (!ath10k_default_bitrate_mask(ar, band, mask)) {
-		if (!ath10k_get_fixed_rate_nss(ar, mask, band,
-					       &fixed_rate,
-					       &fixed_nss))
-			return -EINVAL;
-	}
-
-	if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
-		ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
-		return -EINVAL;
-	}
-
-	return ath10k_set_fixed_rate_param(arvif, fixed_rate,
-					   fixed_nss, force_sgi);
-}
-
 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_sta *sta,
@@ -5090,6 +6066,317 @@
 	return -EINVAL;
 }
 
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar,
+			     struct ieee80211_chanctx_conf *ctx,
+			     struct ieee80211_vif_chanctx_switch *vifs,
+			     int n_vifs)
+{
+	struct cfg80211_chan_def *def = NULL;
+
+	/* Both locks are required because ar->rx_channel is modified. This
+	 * allows readers to hold either lock.
+	 */
+	lockdep_assert_held(&ar->conf_mutex);
+	lockdep_assert_held(&ar->data_lock);
+
+	WARN_ON(ctx && vifs);
+	WARN_ON(vifs && n_vifs != 1);
+
+	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+	 * ppdu on Rx may reduce performance on low-end systems. It should be
+	 * possible to make tables/hashmaps to speed the lookup up (be vary of
+	 * cpu data cache lines though regarding sizes) but to keep the initial
+	 * implementation simple and less intrusive fallback to the slow lookup
+	 * only for multi-channel cases. Single-channel cases will remain to
+	 * use the old channel derival and thus performance should not be
+	 * affected much.
+	 */
+	rcu_read_lock();
+	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
+		ieee80211_iter_chan_contexts_atomic(ar->hw,
+					ath10k_mac_get_any_chandef_iter,
+					&def);
+
+		if (vifs)
+			def = &vifs[0].new_ctx->def;
+
+		ar->rx_channel = def->chan;
+	} else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) {
+		ar->rx_channel = ctx->def.chan;
+	} else {
+		ar->rx_channel = NULL;
+	}
+	rcu_read_unlock();
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+			  struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx add freq %hu width %d ptr %p\n",
+		   ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
+	spin_unlock_bh(&ar->data_lock);
+
+	ath10k_recalc_radar_detection(ar);
+	ath10k_monitor_recalc(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+			     struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx remove freq %hu width %d ptr %p\n",
+		   ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
+	spin_unlock_bh(&ar->data_lock);
+
+	ath10k_recalc_radar_detection(ar);
+	ath10k_monitor_recalc(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+			     struct ieee80211_chanctx_conf *ctx,
+			     u32 changed)
+{
+	struct ath10k *ar = hw->priv;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+	/* This shouldn't really happen because channel switching should use
+	 * switch_vif_chanctx().
+	 */
+	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+		goto unlock;
+
+	ath10k_recalc_radar_detection(ar);
+
+	/* FIXME: How to configure Rx chains properly? */
+
+	/* No other actions are actually necessary. Firmware maintains channel
+	 * definitions per vdev internally and there's no host-side channel
+	 * context abstraction to configure, e.g. channel width.
+	 */
+
+unlock:
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = (void *)vif->drv_priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx assign ptr %p vdev_id %i\n",
+		   ctx, arvif->vdev_id);
+
+	if (WARN_ON(arvif->is_started)) {
+		mutex_unlock(&ar->conf_mutex);
+		return -EBUSY;
+	}
+
+	ret = ath10k_vdev_start(arvif, &ctx->def);
+	if (ret) {
+		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+			    arvif->vdev_id, vif->addr,
+			    ctx->def.chan->center_freq, ret);
+		goto err;
+	}
+
+	arvif->is_started = true;
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+		if (ret) {
+			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_stop;
+		}
+
+		arvif->is_up = true;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_stop:
+	ath10k_vdev_stop(arvif);
+	arvif->is_started = false;
+
+err:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = (void *)vif->drv_priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx unassign ptr %p vdev_id %i\n",
+		   ctx, arvif->vdev_id);
+
+	WARN_ON(!arvif->is_started);
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		WARN_ON(!arvif->is_up);
+
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret)
+			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+
+		arvif->is_up = false;
+	}
+
+	ret = ath10k_vdev_stop(arvif);
+	if (ret)
+		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+
+	arvif->is_started = false;
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+				 struct ieee80211_vif_chanctx_switch *vifs,
+				 int n_vifs,
+				 enum ieee80211_chanctx_switch_mode mode)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif;
+	int ret;
+	int i;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx switch n_vifs %d mode %d\n",
+		   n_vifs, mode);
+
+	/* First stop monitor interface. Some FW versions crash if there's a
+	 * lone monitor interface.
+	 */
+	if (ar->monitor_started)
+		ath10k_monitor_stop(ar);
+
+	for (i = 0; i < n_vifs; i++) {
+		arvif = ath10k_vif_to_arvif(vifs[i].vif);
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+			   arvif->vdev_id,
+			   vifs[i].old_ctx->def.chan->center_freq,
+			   vifs[i].new_ctx->def.chan->center_freq,
+			   vifs[i].old_ctx->def.width,
+			   vifs[i].new_ctx->def.width);
+
+		if (WARN_ON(!arvif->is_started))
+			continue;
+
+		if (WARN_ON(!arvif->is_up))
+			continue;
+
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret) {
+			ath10k_warn(ar, "failed to down vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+	}
+
+	/* All relevant vdevs are downed and associated channel resources
+	 * should be available for the channel switch now.
+	 */
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
+	spin_unlock_bh(&ar->data_lock);
+
+	for (i = 0; i < n_vifs; i++) {
+		arvif = ath10k_vif_to_arvif(vifs[i].vif);
+
+		if (WARN_ON(!arvif->is_started))
+			continue;
+
+		if (WARN_ON(!arvif->is_up))
+			continue;
+
+		ret = ath10k_mac_setup_bcn_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_mac_setup_prb_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+		if (ret) {
+			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+
+		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+					 arvif->bssid);
+		if (ret) {
+			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+	}
+
+	ath10k_monitor_recalc(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
 	.tx				= ath10k_tx,
 	.start				= ath10k_start,
@@ -5114,31 +6401,31 @@
 	.get_antenna			= ath10k_get_antenna,
 	.reconfig_complete		= ath10k_reconfig_complete,
 	.get_survey			= ath10k_get_survey,
-	.set_bitrate_mask		= ath10k_set_bitrate_mask,
+	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
 	.sta_rc_update			= ath10k_sta_rc_update,
 	.get_tsf			= ath10k_get_tsf,
 	.ampdu_action			= ath10k_ampdu_action,
 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
 	.get_et_stats			= ath10k_debug_get_et_stats,
 	.get_et_strings			= ath10k_debug_get_et_strings,
+	.add_chanctx			= ath10k_mac_op_add_chanctx,
+	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
+	.change_chanctx			= ath10k_mac_op_change_chanctx,
+	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
+	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
+	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
 
 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
 
 #ifdef CONFIG_PM
-	.suspend			= ath10k_suspend,
-	.resume				= ath10k_resume,
+	.suspend			= ath10k_wow_op_suspend,
+	.resume				= ath10k_wow_op_resume,
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
 #endif
 };
 
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
-	.bitrate		= (_rate), \
-	.flags			= (_flags), \
-	.hw_value		= (_rateid), \
-}
-
 #define CHAN2G(_channel, _freq, _flags) { \
 	.band			= IEEE80211_BAND_2GHZ, \
 	.hw_value		= (_channel), \
@@ -5194,6 +6481,7 @@
 	CHAN5G(132, 5660, 0),
 	CHAN5G(136, 5680, 0),
 	CHAN5G(140, 5700, 0),
+	CHAN5G(144, 5720, 0),
 	CHAN5G(149, 5745, 0),
 	CHAN5G(153, 5765, 0),
 	CHAN5G(157, 5785, 0),
@@ -5201,31 +6489,6 @@
 	CHAN5G(165, 5825, 0),
 };
 
-/* Note: Be careful if you re-order these. There is code which depends on this
- * ordering.
- */
-static struct ieee80211_rate ath10k_rates[] = {
-	/* CCK */
-	RATETAB_ENT(10,  0x82, 0),
-	RATETAB_ENT(20,  0x84, 0),
-	RATETAB_ENT(55,  0x8b, 0),
-	RATETAB_ENT(110, 0x96, 0),
-	/* OFDM */
-	RATETAB_ENT(60,  0x0c, 0),
-	RATETAB_ENT(90,  0x12, 0),
-	RATETAB_ENT(120, 0x18, 0),
-	RATETAB_ENT(180, 0x24, 0),
-	RATETAB_ENT(240, 0x30, 0),
-	RATETAB_ENT(360, 0x48, 0),
-	RATETAB_ENT(480, 0x60, 0),
-	RATETAB_ENT(540, 0x6c, 0),
-};
-
-#define ath10k_a_rates (ath10k_rates + 4)
-#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
-#define ath10k_g_rates (ath10k_rates + 0)
-#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
-
 struct ath10k *ath10k_mac_create(size_t priv_size)
 {
 	struct ieee80211_hw *hw;
@@ -5299,15 +6562,92 @@
 	},
 };
 
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+			 BIT(NL80211_IFTYPE_AP) |
+			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+	{
+		.limits = ath10k_tlv_if_limit,
+		.num_different_channels = 1,
+		.max_interfaces = 3,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+	},
+	{
+		.limits = ath10k_tlv_if_limit_ibss,
+		.num_different_channels = 1,
+		.max_interfaces = 2,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+	},
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+	{
+		.limits = ath10k_tlv_if_limit,
+		.num_different_channels = 2,
+		.max_interfaces = 3,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+	},
+	{
+		.limits = ath10k_tlv_if_limit_ibss,
+		.num_different_channels = 1,
+		.max_interfaces = 2,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+	},
+};
+
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
 	struct ieee80211_sta_vht_cap vht_cap = {0};
 	u16 mcs_map;
+	u32 val;
 	int i;
 
 	vht_cap.vht_supported = 1;
 	vht_cap.cap = ar->vht_cap_info;
 
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+		val = ar->num_rf_chains - 1;
+		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+		vht_cap.cap |= val;
+	}
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+		val = ar->num_rf_chains - 1;
+		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+		vht_cap.cap |= val;
+	}
+
 	mcs_map = 0;
 	for (i = 0; i < 8; i++) {
 		if (i < ar->num_rf_chains)
@@ -5438,6 +6778,10 @@
 	ht_cap = ath10k_get_ht_cap(ar);
 	vht_cap = ath10k_create_vht_cap(ar);
 
+	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
+		     ATH10K_NUM_CHANS);
+
 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
 		channels = kmemdup(ath10k_2ghz_channels,
 				   sizeof(ath10k_2ghz_channels),
@@ -5492,24 +6836,31 @@
 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
 			BIT(NL80211_IFTYPE_P2P_GO);
 
-	ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
-			IEEE80211_HW_SUPPORTS_PS |
-			IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
-			IEEE80211_HW_MFP_CAPABLE |
-			IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-			IEEE80211_HW_HAS_RATE_CONTROL |
-			IEEE80211_HW_AP_LINK_PS |
-			IEEE80211_HW_SPECTRUM_MGMT |
-			IEEE80211_HW_SW_CRYPTO_CONTROL;
+	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(ar->hw, AP_LINK_PS);
+	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 
 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
 		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
 
 	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
-		ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
-		ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
 	}
 
 	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -5533,6 +6884,9 @@
 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 	}
 
+	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
+		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -5540,20 +6894,46 @@
 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
 
+	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+	ret = ath10k_wow_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to init wow: %d\n", ret);
+		goto err_free;
+	}
+
 	/*
 	 * on LL hardware queues are managed entirely by the FW
 	 * so we only advertise to mac we can do the queues thing
 	 */
-	ar->hw->queues = 4;
+	ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+	 * something that vdev_ids can't reach so that we don't stop the queue
+	 * accidentally.
+	 */
+	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
 
 	switch (ar->wmi.op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
-	case ATH10K_FW_WMI_OP_VERSION_TLV:
 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
 		ar->hw->wiphy->n_iface_combinations =
 			ARRAY_SIZE(ath10k_if_comb);
 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
 		break;
+	case ATH10K_FW_WMI_OP_VERSION_TLV:
+		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+			ar->hw->wiphy->iface_combinations =
+				ath10k_tlv_qcs_if_comb;
+			ar->hw->wiphy->n_iface_combinations =
+				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+		} else {
+			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+			ar->hw->wiphy->n_iface_combinations =
+				ARRAY_SIZE(ath10k_tlv_if_comb);
+		}
+		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6829611..b291f06 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -23,11 +23,22 @@
 
 #define WEP_KEYID_SHIFT 6
 
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
 struct ath10k_generic_iter {
 	struct ath10k *ar;
 	int ret;
 };
 
+struct rfc1042_hdr {
+	u8 llc_dsap;
+	u8 llc_ssap;
+	u8 llc_ctrl;
+	u8 snap_oui[3];
+	__be16 snap_type;
+} __packed;
+
 struct ath10k *ath10k_mac_create(size_t priv_size);
 void ath10k_mac_destroy(struct ath10k *ar);
 int ath10k_mac_register(struct ath10k *ar);
@@ -45,6 +56,24 @@
 void ath10k_drain_tx(struct ath10k *ar);
 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
 				    u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+			struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+				enum wmi_tlv_tx_pause_id pause_id,
+				enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+			     u8 hw_rate);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+			     u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
 {
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644
index 0000000..c0b6ffa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+				   const struct wmi_p2p_noa_info *noa)
+{
+	struct ieee80211_p2p_noa_attr *noa_attr;
+	u8  ctwindow_oppps = noa->ctwindow_oppps;
+	u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+	bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+	__le16 *noa_attr_len;
+	u16 attr_len;
+	u8 noa_descriptors = noa->num_descriptors;
+	int i;
+
+	/* P2P IE */
+	data[0] = WLAN_EID_VENDOR_SPECIFIC;
+	data[1] = len - 2;
+	data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+	data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+	data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+	data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+	/* NOA ATTR */
+	data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+	noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+	noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+	noa_attr->index = noa->index;
+	noa_attr->oppps_ctwindow = ctwindow;
+	if (oppps)
+		noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+	for (i = 0; i < noa_descriptors; i++) {
+		noa_attr->desc[i].count =
+			__le32_to_cpu(noa->descriptors[i].type_count);
+		noa_attr->desc[i].duration = noa->descriptors[i].duration;
+		noa_attr->desc[i].interval = noa->descriptors[i].interval;
+		noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+	}
+
+	attr_len = 2; /* index + oppps_ctwindow */
+	attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+	*noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+	size_t len = 0;
+
+	if (!noa->num_descriptors &&
+	    !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+		return 0;
+
+	len += 1 + 1 + 4; /* EID + len + OUI */
+	len += 1 + 2; /* noa attr + attr len */
+	len += 1 + 1; /* index + oppps_ctwindow */
+	len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+	return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+				     size_t len)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	kfree(arvif->u.ap.noa_data);
+
+	arvif->u.ap.noa_data = ie;
+	arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+				    const struct wmi_p2p_noa_info *noa)
+{
+	struct ath10k *ar = arvif->ar;
+	void *ie;
+	size_t len;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+	len = ath10k_p2p_noa_ie_len_compute(noa);
+	if (!len)
+		return;
+
+	ie = kmalloc(len, GFP_ATOMIC);
+	if (!ie)
+		return;
+
+	ath10k_p2p_noa_ie_fill(ie, len, noa);
+	ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+			   const struct wmi_p2p_noa_info *noa)
+{
+	struct ath10k *ar = arvif->ar;
+
+	spin_lock_bh(&ar->data_lock);
+	__ath10k_p2p_noa_update(arvif, noa);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+	u32 vdev_id;
+	const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_p2p_noa_arg *arg = data;
+
+	if (arvif->vdev_id != arg->vdev_id)
+		return;
+
+	ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+				      const struct wmi_p2p_noa_info *noa)
+{
+	struct ath10k_p2p_noa_arg arg = {
+		.vdev_id = vdev_id,
+		.noa = noa,
+	};
+
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_p2p_noa_update_vdev_iter,
+						   &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644
index 0000000..7be616e2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+			   const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+				      const struct wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 7681237..ea656e0 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -113,7 +113,7 @@
 		.flags = CE_ATTR_FLAGS,
 		.src_nentries = 0,
 		.src_sz_max = 2048,
-		.dest_nentries = 32,
+		.dest_nentries = 128,
 	},
 
 	/* CE3: host->target WMI */
@@ -183,7 +183,7 @@
 	{
 		.pipenum = __cpu_to_le32(2),
 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
-		.nentries = __cpu_to_le32(32),
+		.nentries = __cpu_to_le32(64),
 		.nbytes_max = __cpu_to_le32(2048),
 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 		.reserved = __cpu_to_le32(0),
@@ -330,6 +330,205 @@
 	},
 };
 
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+			   RTC_STATE_ADDRESS);
+
+	return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	lockdep_assert_held(&ar_pci->ps_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	iowrite32(PCIE_SOC_WAKE_V_MASK,
+		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+		  PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	lockdep_assert_held(&ar_pci->ps_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	iowrite32(PCIE_SOC_WAKE_RESET,
+		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+		  PCIE_SOC_WAKE_ADDRESS);
+	ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+	int tot_delay = 0;
+	int curr_delay = 5;
+
+	while (tot_delay < PCIE_WAKE_TIMEOUT) {
+		if (ath10k_pci_is_awake(ar))
+			return 0;
+
+		udelay(curr_delay);
+		tot_delay += curr_delay;
+
+		if (curr_delay < 50)
+			curr_delay += 5;
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	/* This function can be called very frequently. To avoid excessive
+	 * CPU stalls for MMIO reads use a cache var to hold the device state.
+	 */
+	if (!ar_pci->ps_awake) {
+		__ath10k_pci_wake(ar);
+
+		ret = ath10k_pci_wake_wait(ar);
+		if (ret == 0)
+			ar_pci->ps_awake = true;
+	}
+
+	if (ret == 0) {
+		ar_pci->ps_wake_refcount++;
+		WARN_ON(ar_pci->ps_wake_refcount == 0);
+	}
+
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+	return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+		goto skip;
+
+	ar_pci->ps_wake_refcount--;
+
+	mod_timer(&ar_pci->ps_timer, jiffies +
+		  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(unsigned long ptr)
+{
+	struct ath10k *ar = (void *)ptr;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	if (ar_pci->ps_wake_refcount > 0)
+		goto skip;
+
+	__ath10k_pci_sleep(ar);
+
+skip:
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	del_timer_sync(&ar_pci->ps_timer);
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+	WARN_ON(ar_pci->ps_wake_refcount > 0);
+	__ath10k_pci_sleep(ar);
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+			    value, offset, ret);
+		return;
+	}
+
+	iowrite32(value, ar_pci->mem + offset);
+	ath10k_pci_sleep(ar);
+}
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	u32 val;
+	int ret;
+
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+			    offset, ret);
+		return 0xffffffff;
+	}
+
+	val = ioread32(ar_pci->mem + offset);
+	ath10k_pci_sleep(ar);
+
+	return val;
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+	return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+	ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
 static bool ath10k_pci_irq_pending(struct ath10k *ar)
 {
 	u32 cause;
@@ -793,45 +992,6 @@
 	return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
 }
 
-static bool ath10k_pci_is_awake(struct ath10k *ar)
-{
-	u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
-
-	return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
-}
-
-static int ath10k_pci_wake_wait(struct ath10k *ar)
-{
-	int tot_delay = 0;
-	int curr_delay = 5;
-
-	while (tot_delay < PCIE_WAKE_TIMEOUT) {
-		if (ath10k_pci_is_awake(ar))
-			return 0;
-
-		udelay(curr_delay);
-		tot_delay += curr_delay;
-
-		if (curr_delay < 50)
-			curr_delay += 5;
-	}
-
-	return -ETIMEDOUT;
-}
-
-static int ath10k_pci_wake(struct ath10k *ar)
-{
-	ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
-			       PCIE_SOC_WAKE_V_MASK);
-	return ath10k_pci_wake_wait(ar);
-}
-
-static void ath10k_pci_sleep(struct ath10k *ar)
-{
-	ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
-			       PCIE_SOC_WAKE_RESET);
-}
-
 /* Called by lower (CE) layer when a send to Target completes. */
 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 {
@@ -1212,11 +1372,15 @@
 
 static int ath10k_pci_hif_start(struct ath10k *ar)
 {
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
 
 	ath10k_pci_irq_enable(ar);
 	ath10k_pci_rx_post(ar);
 
+	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+				   ar_pci->link_ctl);
+
 	return 0;
 }
 
@@ -1260,7 +1424,6 @@
 	struct ath10k_ce_ring *ce_ring;
 	struct ce_desc *ce_desc;
 	struct sk_buff *skb;
-	unsigned int id;
 	int i;
 
 	ar = pci_pipe->hif_ce_state;
@@ -1284,8 +1447,6 @@
 			continue;
 
 		ce_ring->per_transfer_context[i] = NULL;
-		id = MS(__le16_to_cpu(ce_desc[i].flags),
-			CE_DESC_FLAGS_META_DATA);
 
 		ar_pci->msg_callbacks_current.tx_completion(ar, skb);
 	}
@@ -1329,6 +1490,9 @@
 
 static void ath10k_pci_hif_stop(struct ath10k *ar)
 {
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
 
 	/* Most likely the device has HTT Rx ring configured. The only way to
@@ -1347,6 +1511,10 @@
 	ath10k_pci_irq_disable(ar);
 	ath10k_pci_irq_sync(ar);
 	ath10k_pci_flush(ar);
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+	WARN_ON(ar_pci->ps_wake_refcount > 0);
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 }
 
 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1524,12 +1692,11 @@
 		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
 		case QCA6174_HW_1_0_CHIP_ID_REV:
 		case QCA6174_HW_1_1_CHIP_ID_REV:
+		case QCA6174_HW_2_1_CHIP_ID_REV:
+		case QCA6174_HW_2_2_CHIP_ID_REV:
 			return 3;
 		case QCA6174_HW_1_3_CHIP_ID_REV:
 			return 2;
-		case QCA6174_HW_2_1_CHIP_ID_REV:
-		case QCA6174_HW_2_2_CHIP_ID_REV:
-			return 6;
 		case QCA6174_HW_3_0_CHIP_ID_REV:
 		case QCA6174_HW_3_1_CHIP_ID_REV:
 		case QCA6174_HW_3_2_CHIP_ID_REV:
@@ -1967,15 +2134,15 @@
 
 static int ath10k_pci_hif_power_up(struct ath10k *ar)
 {
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret;
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
 
-	ret = ath10k_pci_wake(ar);
-	if (ret) {
-		ath10k_err(ar, "failed to wake up target: %d\n", ret);
-		return ret;
-	}
+	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+				  &ar_pci->link_ctl);
+	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+				   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
 
 	/*
 	 * Bring the target up cleanly.
@@ -2023,7 +2190,6 @@
 	ath10k_pci_ce_deinit(ar);
 
 err_sleep:
-	ath10k_pci_sleep(ar);
 	return ret;
 }
 
@@ -2034,28 +2200,18 @@
 	/* Currently hif_power_up performs effectively a reset and hif_stop
 	 * resets the chip as well so there's no point in resetting here.
 	 */
-
-	ath10k_pci_sleep(ar);
 }
 
 #ifdef CONFIG_PM
 
-#define ATH10K_PCI_PM_CONTROL 0x44
-
 static int ath10k_pci_hif_suspend(struct ath10k *ar)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct pci_dev *pdev = ar_pci->pdev;
-	u32 val;
-
-	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
-	if ((val & 0x000000ff) != 0x3) {
-		pci_save_state(pdev);
-		pci_disable_device(pdev);
-		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
-				       (val & 0xffffff00) | 0x03);
-	}
+	/* The grace timer can still be counting down and ar->ps_awake be true.
+	 * It is known that the device may be asleep after resuming regardless
+	 * of the SoC powersave state before suspending. Hence make sure the
+	 * device is asleep before proceeding.
+	 */
+	ath10k_pci_sleep_sync(ar);
 
 	return 0;
 }
@@ -2066,22 +2222,14 @@
 	struct pci_dev *pdev = ar_pci->pdev;
 	u32 val;
 
-	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
-	if ((val & 0x000000ff) != 0) {
-		pci_restore_state(pdev);
-		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
-				       val & 0xffffff00);
-		/*
-		 * Suspend/Resume resets the PCI configuration space,
-		 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
-		 * to keep PCI Tx retries from interfering with C3 CPU state
-		 */
-		pci_read_config_dword(pdev, 0x40, &val);
-
-		if ((val & 0x0000ff00) != 0)
-			pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-	}
+	/* Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+	 * from interfering with C3 CPU state. pci_restore_state won't help
+	 * here since it only restores the first 64 bytes pci config header.
+	 */
+	pci_read_config_dword(pdev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
 	return 0;
 }
@@ -2497,7 +2645,6 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	struct pci_dev *pdev = ar_pci->pdev;
-	u32 lcr_val;
 	int ret;
 
 	pci_set_drvdata(pdev, ar);
@@ -2531,10 +2678,6 @@
 
 	pci_set_master(pdev);
 
-	/* Workaround: Disable ASPM */
-	pci_read_config_dword(pdev, 0x80, &lcr_val);
-	pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
-
 	/* Arrange for access to Target SoC registers. */
 	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
 	if (!ar_pci->mem) {
@@ -2621,9 +2764,19 @@
 	ar_pci->dev = &pdev->dev;
 	ar_pci->ar = ar;
 
+	if (pdev->subsystem_vendor || pdev->subsystem_device)
+		scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
+			  "%04x:%04x:%04x:%04x",
+			  pdev->vendor, pdev->device,
+			  pdev->subsystem_vendor, pdev->subsystem_device);
+
 	spin_lock_init(&ar_pci->ce_lock);
+	spin_lock_init(&ar_pci->ps_lock);
+
 	setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
 		    (unsigned long)ar);
+	setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
+		    (unsigned long)ar);
 
 	ret = ath10k_pci_claim(ar);
 	if (ret) {
@@ -2631,12 +2784,6 @@
 		goto err_core_destroy;
 	}
 
-	ret = ath10k_pci_wake(ar);
-	if (ret) {
-		ath10k_err(ar, "failed to wake up: %d\n", ret);
-		goto err_release;
-	}
-
 	ret = ath10k_pci_alloc_pipes(ar);
 	if (ret) {
 		ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2678,11 +2825,9 @@
 	if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
 		ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
 			   pdev->device, chip_id);
-		goto err_sleep;
+		goto err_free_irq;
 	}
 
-	ath10k_pci_sleep(ar);
-
 	ret = ath10k_core_register(ar, chip_id);
 	if (ret) {
 		ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2702,9 +2847,7 @@
 	ath10k_pci_free_pipes(ar);
 
 err_sleep:
-	ath10k_pci_sleep(ar);
-
-err_release:
+	ath10k_pci_sleep_sync(ar);
 	ath10k_pci_release(ar);
 
 err_core_destroy:
@@ -2734,6 +2877,7 @@
 	ath10k_pci_deinit_irq(ar);
 	ath10k_pci_ce_deinit(ar);
 	ath10k_pci_free_pipes(ar);
+	ath10k_pci_sleep_sync(ar);
 	ath10k_pci_release(ar);
 	ath10k_core_destroy(ar);
 }
@@ -2770,7 +2914,21 @@
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index bddf543..d7696dd 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -185,6 +185,41 @@
 	/* Map CE id to ce_state */
 	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
 	struct timer_list rx_post_retry;
+
+	/* Due to HW quirks it is recommended to disable ASPM during device
+	 * bootup. To do that the original PCI-E Link Control is stored before
+	 * device bootup is executed and re-programmed later.
+	 */
+	u16 link_ctl;
+
+	/* Protects ps_awake and ps_wake_refcount */
+	spinlock_t ps_lock;
+
+	/* The device has a special powersave-oriented register. When device is
+	 * considered asleep it drains less power and driver is forbidden from
+	 * accessing most MMIO registers. If host were to access them without
+	 * waking up the device might scribble over host memory or return
+	 * 0xdeadbeef readouts.
+	 */
+	unsigned long ps_wake_refcount;
+
+	/* Waking up takes some time (up to 2ms in some cases) so it can be bad
+	 * for latency. To mitigate this the device isn't immediately allowed
+	 * to sleep after all references are undone - instead there's a grace
+	 * period after which the powersave register is updated unless some
+	 * activity to/from device happened in the meantime.
+	 *
+	 * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+	 */
+	struct timer_list ps_timer;
+
+	/* MMIO registers are used to communicate with the device. With
+	 * intensive traffic accessing powersave register would be a bit
+	 * wasteful overhead and would needlessly stall CPU. It is far more
+	 * efficient to rely on a variable in RAM and update it only upon
+	 * powersave register state changes.
+	 */
+	bool ps_awake;
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -209,61 +244,25 @@
  * for this device; but that's not guaranteed.
  */
 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)			\
-	(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS|			\
+	(((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS |		\
 	  CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |				\
 	 0x100000 | ((addr) & 0xfffff))
 
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
 
-/* Target exposes its registers for direct access. However before host can
- * access them it needs to make sure the target is awake (ath10k_pci_wake,
- * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
- * to sleep unless host tells it to (ath10k_pci_sleep).
- *
- * If host tries to access target registers without waking it up it can
- * scribble over host memory.
- *
- * If target is asleep waking it up may take up to even 2ms.
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
+
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
  */
-
-static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
-				      u32 value)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	iowrite32(value, ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	return ioread32(ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
-{
-	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
-}
-
-static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
-{
-	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
-}
-
-static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
-
-static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
 
 #endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index e9cc778..492b5a5 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -661,6 +661,28 @@
 #define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
 #define RX_PPDU_START_INFO5_SERVICE_LSB  0
 
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+enum rx_ppdu_start_rate {
+	RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
+	RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
+	RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
+	RX_PPDU_START_RATE_OFDM_6M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
+	RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
+	RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
+	RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
+	RX_PPDU_START_RATE_OFDM_9M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
+
+	RX_PPDU_START_RATE_CCK_LP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
+	RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
+	RX_PPDU_START_RATE_CCK_LP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
+	RX_PPDU_START_RATE_CCK_LP_1M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
+	RX_PPDU_START_RATE_CCK_SP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
+	RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
+	RX_PPDU_START_RATE_CCK_SP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
+};
+
 struct rx_ppdu_start {
 	struct {
 		u8 pri20_mhz;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index d22addf..8dcd424 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -519,9 +519,12 @@
 
 int ath10k_spectral_create(struct ath10k *ar)
 {
+	/* The buffer size covers whole channels in dual bands up to 128 bins.
+	 * Scan with bigger than 128 bins needs to be run on single band each.
+	 */
 	ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
 						     ar->debug.debugfs_phy,
-						     1024, 256,
+						     1140, 2500,
 						     &rfs_spec_scan_cb, NULL);
 	debugfs_create_file("spectral_scan_ctl",
 			    S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index aede750..1a899d7 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -23,102 +23,50 @@
 #include "debug.h"
 #include "wmi-ops.h"
 
-static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
-					  enum wmi_vdev_type type)
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long *state)
 {
-	struct ath10k_vif *arvif;
-	int count = 0;
-
-	lockdep_assert_held(&ar->conf_mutex);
-
-	list_for_each_entry(arvif, &ar->arvifs, list) {
-		if (!arvif->is_started)
-			continue;
-
-		if (!arvif->is_up)
-			continue;
-
-		if (arvif->vdev_type != type)
-			continue;
-
-		count++;
-	}
-	return count;
-}
-
-static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
-					    unsigned long *state)
-{
-	*state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+	*state = ATH10K_THERMAL_THROTTLE_MAX;
 
 	return 0;
 }
 
-static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
-					    unsigned long *state)
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long *state)
 {
 	struct ath10k *ar = cdev->devdata;
 
 	mutex_lock(&ar->conf_mutex);
-	*state = ar->thermal.duty_cycle;
+	*state = ar->thermal.throttle_state;
 	mutex_unlock(&ar->conf_mutex);
 
 	return 0;
 }
 
-static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
-					    unsigned long duty_cycle)
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long throttle_state)
 {
 	struct ath10k *ar = cdev->devdata;
-	u32 period, duration, enabled;
-	int num_bss, ret = 0;
 
+	if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+		ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+			    throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+		return -EINVAL;
+	}
 	mutex_lock(&ar->conf_mutex);
-	if (ar->state != ATH10K_STATE_ON) {
-		ret = -ENETDOWN;
-		goto out;
-	}
-
-	if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
-		ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
-			    duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
-		ret = -EINVAL;
-		goto out;
-	}
-	/* TODO: Right now, thermal mitigation is handled only for single/multi
-	 * vif AP mode. Since quiet param is not validated in STA mode, it needs
-	 * to be investigated further to handle multi STA and multi-vif (AP+STA)
-	 * mode properly.
-	 */
-	num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
-	if (!num_bss) {
-		ath10k_warn(ar, "no active AP interfaces\n");
-		ret = -ENETDOWN;
-		goto out;
-	}
-	period = max(ATH10K_QUIET_PERIOD_MIN,
-		     (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
-	duration = (period * duty_cycle) / 100;
-	enabled = duration ? 1 : 0;
-
-	ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
-					     ATH10K_QUIET_START_OFFSET,
-					     enabled);
-	if (ret) {
-		ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
-			    period, duration, enabled, ret);
-		goto out;
-	}
-	ar->thermal.duty_cycle = duty_cycle;
-out:
+	ar->thermal.throttle_state = throttle_state;
+	ath10k_thermal_set_throttling(ar);
 	mutex_unlock(&ar->conf_mutex);
-	return ret;
+	return 0;
 }
 
 static struct thermal_cooling_device_ops ath10k_thermal_ops = {
-	.get_max_state = ath10k_thermal_get_max_dutycycle,
-	.get_cur_state = ath10k_thermal_get_cur_dutycycle,
-	.set_cur_state = ath10k_thermal_set_cur_dutycycle,
+	.get_max_state = ath10k_thermal_get_max_throttle_state,
+	.get_cur_state = ath10k_thermal_get_cur_throttle_state,
+	.set_cur_state = ath10k_thermal_set_cur_throttle_state,
 };
 
 static ssize_t ath10k_thermal_show_temp(struct device *dev,
@@ -127,6 +75,7 @@
 {
 	struct ath10k *ar = dev_get_drvdata(dev);
 	int ret, temperature;
+	unsigned long time_left;
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -148,9 +97,9 @@
 		goto out;
 	}
 
-	ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
-					  ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
-	if (ret == 0) {
+	time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+						ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+	if (!time_left) {
 		ath10k_warn(ar, "failed to synchronize thermal read\n");
 		ret = -ETIMEDOUT;
 		goto out;
@@ -184,6 +133,32 @@
 };
 ATTRIBUTE_GROUPS(ath10k_hwmon);
 
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+	u32 period, duration, enabled;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+		return;
+
+	if (ar->state != ATH10K_STATE_ON)
+		return;
+
+	period = ar->thermal.quiet_period;
+	duration = (period * ar->thermal.throttle_state) / 100;
+	enabled = duration ? 1 : 0;
+
+	ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+					     ATH10K_QUIET_START_OFFSET,
+					     enabled);
+	if (ret) {
+		ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+			    period, duration, enabled, ret);
+	}
+}
+
 int ath10k_thermal_register(struct ath10k *ar)
 {
 	struct thermal_cooling_device *cdev;
@@ -202,11 +177,12 @@
 	ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
 				"cooling_device");
 	if (ret) {
-		ath10k_err(ar, "failed to create thermal symlink\n");
+		ath10k_err(ar, "failed to create cooling device symlink\n");
 		goto err_cooling_destroy;
 	}
 
 	ar->thermal.cdev = cdev;
+	ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
 
 	/* Do not register hwmon device when temperature reading is not
 	 * supported by firmware
@@ -231,7 +207,7 @@
 	return 0;
 
 err_remove_link:
-	sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+	sysfs_remove_link(&ar->dev->kobj, "cooling_device");
 err_cooling_destroy:
 	thermal_cooling_device_unregister(cdev);
 	return ret;
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index bccc17a..b610ea5 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -19,16 +19,17 @@
 #define ATH10K_QUIET_PERIOD_DEFAULT     100
 #define ATH10K_QUIET_PERIOD_MIN         25
 #define ATH10K_QUIET_START_OFFSET       10
-#define ATH10K_QUIET_DUTY_CYCLE_MAX     70
 #define ATH10K_HWMON_NAME_LEN           15
 #define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX     100
 
 struct ath10k_thermal {
 	struct thermal_cooling_device *cdev;
 	struct completion wmi_sync;
 
 	/* protected by conf_mutex */
-	u32 duty_cycle;
+	u32 throttle_state;
+	u32 quiet_period;
 	/* temperature value in Celcius degree
 	 * protected by data_lock
 	 */
@@ -39,6 +40,7 @@
 int ath10k_thermal_register(struct ath10k *ar);
 void ath10k_thermal_unregister(struct ath10k *ar);
 void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
 #else
 static inline int ath10k_thermal_register(struct ath10k *ar)
 {
@@ -54,5 +56,9 @@
 {
 }
 
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
 #endif
 #endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 5407887..71bdb36 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -21,11 +21,16 @@
 #include "core.h"
 
 #if !defined(_TRACE_H_)
-static inline u32 ath10k_frm_hdr_len(const void *buf)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
 {
 	const struct ieee80211_hdr *hdr = buf;
 
-	return ieee80211_hdrlen(hdr->frame_control);
+	/* In some rare cases (e.g. fcs error) device reports frame buffer
+	 * shorter than what frame header implies (e.g. len = 0). The buffer
+	 * can still be accessed so do a simple min() to guarantee caller
+	 * doesn't get value greater than len.
+	 */
+	return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
 }
 #endif
 
@@ -46,7 +51,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM ath10k
 
-#define ATH10K_MSG_MAX 200
+#define ATH10K_MSG_MAX 400
 
 DECLARE_EVENT_CLASS(ath10k_log_event,
 	TP_PROTO(struct ath10k *ar, struct va_format *vaf),
@@ -360,13 +365,13 @@
 		__string(device, dev_name(ar->dev))
 		__string(driver, dev_driver_string(ar->dev))
 		__field(size_t, len)
-		__dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+		__dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
 	),
 
 	TP_fast_assign(
 		__assign_str(device, dev_name(ar->dev));
 		__assign_str(driver, dev_driver_string(ar->dev));
-		__entry->len = ath10k_frm_hdr_len(data);
+		__entry->len = ath10k_frm_hdr_len(data, len);
 		memcpy(__get_dynamic_array(data), data, __entry->len);
 	),
 
@@ -387,15 +392,16 @@
 		__string(device, dev_name(ar->dev))
 		__string(driver, dev_driver_string(ar->dev))
 		__field(size_t, len)
-		__dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+		__dynamic_array(u8, payload, (len -
+					      ath10k_frm_hdr_len(data, len)))
 	),
 
 	TP_fast_assign(
 		__assign_str(device, dev_name(ar->dev));
 		__assign_str(driver, dev_driver_string(ar->dev));
-		__entry->len = len - ath10k_frm_hdr_len(data);
+		__entry->len = len - ath10k_frm_hdr_len(data, len);
 		memcpy(__get_dynamic_array(payload),
-		       data + ath10k_frm_hdr_len(data), __entry->len);
+		       data + ath10k_frm_hdr_len(data, len), __entry->len);
 	),
 
 	TP_printk(
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 3f00cec..826500b 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -55,8 +55,10 @@
 
 	lockdep_assert_held(&htt->tx_lock);
 
-	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
-		   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
+		   tx_done->msdu_id, !!tx_done->discard,
+		   !!tx_done->no_ack, !!tx_done->success);
 
 	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
 		ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
@@ -97,6 +99,9 @@
 	if (tx_done->no_ack)
 		info->flags &= ~IEEE80211_TX_STAT_ACK;
 
+	if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
 	ieee80211_tx_status(htt->ar->hw, msdu);
 	/* we do not own the msdu anymore */
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c8b64e7..47fe2e7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -45,6 +45,10 @@
 			struct wmi_rdy_ev_arg *arg);
 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
 			     struct ath10k_fw_stats *stats);
+	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+			    struct wmi_roam_ev_arg *arg);
+	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+			      struct wmi_wow_ev_arg *arg);
 
 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -81,7 +85,8 @@
 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
 					     const struct wmi_wmm_params_all_arg *arg);
 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
-					   const u8 peer_addr[ETH_ALEN]);
+					   const u8 peer_addr[ETH_ALEN],
+					   enum wmi_peer_type peer_type);
 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
 					   const u8 peer_addr[ETH_ALEN]);
 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
@@ -148,6 +153,27 @@
 					      u32 num_ac);
 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
 					     const struct wmi_sta_keepalive_arg *arg);
+	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+						    enum wmi_wow_wakeup_event event,
+						    u32 enable);
+	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+					       u32 pattern_id,
+					       const u8 *pattern,
+					       const u8 *mask,
+					       int pattern_len,
+					       int pattern_offset);
+	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+					       u32 pattern_id);
+	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+						    u32 vdev_id,
+						    enum wmi_tdls_state state);
+	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+						const struct wmi_tdls_peer_update_cmd_arg *arg,
+						const struct wmi_tdls_peer_capab_arg *cap,
+						const struct wmi_channel_arg *chan);
+	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
 };
 
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -274,6 +300,26 @@
 }
 
 static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_roam_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_roam_ev)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+			  struct wmi_wow_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_wow_event)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
+static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
@@ -624,14 +670,15 @@
 
 static inline int
 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
-		       const u8 peer_addr[ETH_ALEN])
+		       const u8 peer_addr[ETH_ALEN],
+		       enum wmi_peer_type peer_type)
 {
 	struct sk_buff *skb;
 
 	if (!ar->wmi.ops->gen_peer_create)
 		return -EOPNOTSUPP;
 
-	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
 	if (IS_ERR(skb))
 		return PTR_ERR(skb);
 
@@ -1060,4 +1107,145 @@
 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
 }
 
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_enable)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_enable(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+				enum wmi_wow_wakeup_event event,
+				u32 enable)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+			   const u8 *pattern, const u8 *mask,
+			   int pattern_len, int pattern_offset)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_add_pattern)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+					       pattern, mask, pattern_len,
+					       pattern_offset);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_del_pattern)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+				enum wmi_tdls_state state)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_update_fw_tdls_state)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+			    const struct wmi_tdls_peer_update_cmd_arg *arg,
+			    const struct wmi_tdls_peer_capab_arg *cap,
+			    const struct wmi_channel_arg *chan)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_tdls_peer_update)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_adaptive_qcs)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index ee0c5f6..8fdba38 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -16,10 +16,13 @@
  */
 #include "core.h"
 #include "debug.h"
+#include "mac.h"
 #include "hw.h"
+#include "mac.h"
 #include "wmi.h"
 #include "wmi-ops.h"
 #include "wmi-tlv.h"
+#include "p2p.h"
 
 /***************/
 /* TLV helpers */
@@ -31,9 +34,9 @@
 
 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
 	[WMI_TLV_TAG_ARRAY_BYTE]
-		= { .min_len = sizeof(u8) },
+		= { .min_len = 0 },
 	[WMI_TLV_TAG_ARRAY_UINT32]
-		= { .min_len = sizeof(u32) },
+		= { .min_len = 0 },
 	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
 		= { .min_len = sizeof(struct wmi_scan_event) },
 	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
@@ -62,6 +65,14 @@
 		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
 	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
 		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
 };
 
 static int
@@ -168,6 +179,7 @@
 {
 	const void **tb;
 	const struct wmi_tlv_bcn_tx_status_ev *ev;
+	struct ath10k_vif *arvif;
 	u32 vdev_id, tx_status;
 	int ret;
 
@@ -201,6 +213,10 @@
 		break;
 	}
 
+	arvif = ath10k_get_arvif(ar, vdev_id);
+	if (arvif && arvif->is_up && arvif->vif->csa_active)
+		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
 	kfree(tb);
 	return 0;
 }
@@ -296,6 +312,83 @@
 	return 0;
 }
 
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+					struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_tlv_p2p_noa_ev *ev;
+	const struct wmi_p2p_noa_info *noa;
+	int ret, vdev_id;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+	if (!ev || !noa) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	vdev_id = __le32_to_cpu(ev->vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+		   vdev_id, noa->num_descriptors);
+
+	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+					 struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_tlv_tx_pause_ev *ev;
+	int ret, vdev_id;
+	u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	pause_id = __le32_to_cpu(ev->pause_id);
+	action = __le32_to_cpu(ev->action);
+	vdev_map = __le32_to_cpu(ev->vdev_map);
+	peer_id = __le32_to_cpu(ev->peer_id);
+	tid_map = __le32_to_cpu(ev->tid_map);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+		   pause_id, action, vdev_map, peer_id, tid_map);
+
+	for (vdev_id = 0; vdev_map; vdev_id++) {
+		if (!(vdev_map & BIT(vdev_id)))
+			continue;
+
+		vdev_map &= ~BIT(vdev_id);
+		ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+	}
+
+	kfree(tb);
+	return 0;
+}
+
 /***********/
 /* TLV ops */
 /***********/
@@ -309,7 +402,7 @@
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
-		return;
+		goto out;
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
@@ -417,11 +510,18 @@
 	case WMI_TLV_DIAG_EVENTID:
 		ath10k_wmi_tlv_event_diag(ar, skb);
 		break;
+	case WMI_TLV_P2P_NOA_EVENTID:
+		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+		break;
+	case WMI_TLV_TX_PAUSE_EVENTID:
+		ath10k_wmi_tlv_event_tx_pause(ar, skb);
+		break;
 	default:
 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
 		break;
 	}
 
+out:
 	dev_kfree_skb(skb);
 }
 
@@ -1012,6 +1112,65 @@
 	return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+					  struct sk_buff *skb,
+					  struct wmi_roam_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_roam_ev *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->vdev_id = ev->vdev_id;
+	arg->reason = ev->reason;
+	arg->rssi = ev->rssi;
+
+	kfree(tb);
+	return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+			      struct wmi_wow_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_wow_event_info *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+	arg->flag = __le32_to_cpu(ev->flag);
+	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+	arg->data_len = __le32_to_cpu(ev->data_len);
+
+	kfree(tb);
+	return 0;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
 {
@@ -1160,8 +1319,8 @@
 	cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
 
 	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
-		cfg->num_offload_peers = __cpu_to_le32(3);
-		cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
 	} else {
 		cfg->num_offload_peers = __cpu_to_le32(0);
 		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
@@ -1178,8 +1337,8 @@
 	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
 	cfg->rx_decap_mode = __cpu_to_le32(1);
 	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
-	cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
-	cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
 	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
 	cfg->num_mcast_groups = __cpu_to_le32(0);
 	cfg->num_mcast_table_elems = __cpu_to_le32(0);
@@ -1193,11 +1352,11 @@
 	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
 	cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
 	cfg->max_frag_entries = __cpu_to_le32(2);
-	cfg->num_tdls_vdevs = __cpu_to_le32(1);
+	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
 	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
 	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
 	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
-	cfg->num_wow_filters = __cpu_to_le32(0x16);
+	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
 	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
 	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
 	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
@@ -1248,7 +1407,7 @@
 	cmd = (void *)tlv->value;
 
 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
-	cmd->burst_duration_ms = __cpu_to_le32(0);
+	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
 	cmd->num_channels = __cpu_to_le32(arg->n_channels);
 	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
 	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
@@ -1408,8 +1567,6 @@
 	void *ptr;
 	u32 flags = 0;
 
-	if (WARN_ON(arg->ssid && arg->ssid_len == 0))
-		return ERR_PTR(-EINVAL);
 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
 		return ERR_PTR(-EINVAL);
 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -1782,7 +1939,8 @@
 
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
-				  const u8 peer_addr[ETH_ALEN])
+				  const u8 peer_addr[ETH_ALEN],
+				  enum wmi_peer_type peer_type)
 {
 	struct wmi_tlv_peer_create_cmd *cmd;
 	struct wmi_tlv *tlv;
@@ -1797,7 +1955,7 @@
 	tlv->len = __cpu_to_le16(sizeof(*cmd));
 	cmd = (void *)tlv->value;
 	cmd->vdev_id = __cpu_to_le32(vdev_id);
-	cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+	cmd->peer_type = __cpu_to_le32(peer_type);
 	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
 
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
@@ -2027,7 +2185,7 @@
 	if (!mac)
 		return ERR_PTR(-EINVAL);
 
-	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
@@ -2485,6 +2643,387 @@
 	return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+					   enum wmi_tdls_state state)
+{
+	struct wmi_tdls_set_state_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+	/* Set to options from wmi_tlv_tdls_options,
+	 * for now none of them are enabled.
+	 */
+	u32 options = 0;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->state = __cpu_to_le32(state);
+	cmd->notification_interval_ms = __cpu_to_le32(5000);
+	cmd->tx_discovery_threshold = __cpu_to_le32(100);
+	cmd->tx_teardown_threshold = __cpu_to_le32(5);
+	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+	cmd->rssi_delta = __cpu_to_le32(-20);
+	cmd->tdls_options = __cpu_to_le32(options);
+	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+		   state, vdev_id);
+	return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+	u32 peer_qos = 0;
+
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+	return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+				       const struct wmi_tdls_peer_update_cmd_arg *arg,
+				       const struct wmi_tdls_peer_capab_arg *cap,
+				       const struct wmi_channel_arg *chan_arg)
+{
+	struct wmi_tdls_peer_update_cmd *cmd;
+	struct wmi_tdls_peer_capab *peer_cap;
+	struct wmi_channel *chan;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	u32 peer_qos;
+	void *ptr;
+	int len;
+	int i;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + sizeof(*peer_cap) +
+	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+	cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+	peer_cap = (void *)tlv->value;
+	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+						   cap->peer_max_sp);
+	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*peer_cap);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+	ptr += sizeof(*tlv);
+
+	for (i = 0; i < cap->peer_chan_len; i++) {
+		tlv = ptr;
+		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+		tlv->len = __cpu_to_le16(sizeof(*chan));
+		chan = (void *)tlv->value;
+		ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+		ptr += sizeof(*tlv);
+		ptr += sizeof(*chan);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+	struct wmi_tlv_wow_enable_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->enable = __cpu_to_le32(1);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+					   u32 vdev_id,
+					   enum wmi_wow_wakeup_event event,
+					   u32 enable)
+{
+	struct wmi_tlv_wow_add_del_event_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->is_add = __cpu_to_le32(enable);
+	cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+		   wow_wakeup_event(event), enable, vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+	struct wmi_tlv_wow_host_wakeup_ind *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+				      u32 pattern_id, const u8 *pattern,
+				      const u8 *bitmask, int pattern_len,
+				      int pattern_offset)
+{
+	struct wmi_tlv_wow_add_pattern_cmd *cmd;
+	struct wmi_tlv_wow_bitmap_pattern *bitmap;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) +			/* array struct */
+	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
+	      sizeof(*tlv) +			/* empty ipv4 sync */
+	      sizeof(*tlv) +			/* empty ipv6 sync */
+	      sizeof(*tlv) +			/* empty magic */
+	      sizeof(*tlv) +			/* empty info timeout */
+	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	/* cmd */
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->pattern_id = __cpu_to_le32(pattern_id);
+	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	/* bitmap */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+	ptr += sizeof(*tlv);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+	tlv->len = __cpu_to_le16(sizeof(*bitmap));
+	bitmap = (void *)tlv->value;
+
+	memcpy(bitmap->patternbuf, pattern, pattern_len);
+	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+	bitmap->pattern_len = __cpu_to_le32(pattern_len);
+	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+	bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*bitmap);
+
+	/* ipv4 sync */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* ipv6 sync */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* magic */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* pattern info timeout */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* ratelimit interval */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(sizeof(u32));
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+		   vdev_id, pattern_id, pattern_offset);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+				      u32 pattern_id)
+{
+	struct wmi_tlv_wow_del_pattern_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->pattern_id = __cpu_to_le32(pattern_id);
+	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+		   vdev_id, pattern_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+	struct wmi_tlv_adaptive_qcs *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+	return skb;
+}
+
 /****************/
 /* TLV mappings */
 /****************/
@@ -2609,6 +3148,9 @@
 	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
 	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -2736,6 +3278,8 @@
 	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
 	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
@@ -2781,6 +3325,14 @@
 	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
 	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
 	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
 };
 
 /************/
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index a6c8280..ad655c4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1454,6 +1454,174 @@
 	__le32 num_chan_stats;
 } __packed;
 
+struct wmi_tlv_p2p_noa_ev {
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+	__le32 vdev_id;
+	__le32 reason;
+	__le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+	__le32 vdev_id;
+	__le32 is_add;
+	__le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+	__le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+	__le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+	__le32 vdev_id;
+	__le32 flag;
+	__le32 wake_reason;
+	__le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+	WOW_PATTERN_MIN = 0,
+	WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+	WOW_IPV4_SYNC_PATTERN,
+	WOW_IPV6_SYNC_PATTERN,
+	WOW_WILD_CARD_PATTERN,
+	WOW_TIMER_PATTERN,
+	WOW_MAGIC_PATTERN,
+	WOW_IPV6_RA_PATTERN,
+	WOW_IOAC_PKT_PATTERN,
+	WOW_IOAC_TMR_PATTERN,
+	WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE		148
+#define WOW_DEFAULT_BITMASK_SIZE		148
+
+struct wmi_tlv_wow_bitmap_pattern {
+	u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+	u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+	__le32 pattern_offset;
+	__le32 pattern_len;
+	__le32 bitmask_len;
+	__le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+	__le32 vdev_id;
+	__le32 pattern_id;
+	__le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+	__le32 vdev_id;
+	__le32 pattern_id;
+	__le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+	WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+	WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+	WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+	__le32 vdev_id;
+	__le32 state;
+	__le32 notification_interval_ms;
+	__le32 tx_discovery_threshold;
+	__le32 tx_teardown_threshold;
+	__le32 rssi_teardown_threshold;
+	__le32 rssi_delta;
+	__le32 tdls_options;
+	__le32 tdls_peer_traffic_ind_window;
+	__le32 tdls_peer_traffic_response_timeout_ms;
+	__le32 tdls_puapsd_mask;
+	__le32 tdls_puapsd_inactivity_time_ms;
+	__le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 peer_state;
+} __packed;
+
+enum {
+	WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+	WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+	WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+	WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK	0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB	5
+
+struct wmi_tdls_peer_capab {
+	__le32 peer_qos;
+	__le32 buff_sta_support;
+	__le32 off_chan_support;
+	__le32 peer_curr_operclass;
+	__le32 self_curr_operclass;
+	__le32 peer_chan_len;
+	__le32 peer_operclass_len;
+	u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+	__le32 is_peer_responder;
+	__le32 pref_offchan_num;
+	__le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+	__le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ *		Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ *		Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ *		vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ *		vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+	WMI_TLV_TX_PAUSE_ID_MCC = 1,
+	WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+	WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+	WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+	WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+	WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+	WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+	WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+	WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+	WMI_TLV_TX_PAUSE_ACTION_STOP,
+	WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+	__le32 pause_id;
+	__le32 action;
+	__le32 vdev_map;
+	__le32 peer_id;
+	__le32 tid_map;
+} __packed;
+
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c7ea77e..6c046c2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -26,6 +26,8 @@
 #include "mac.h"
 #include "testmode.h"
 #include "wmi-ops.h"
+#include "p2p.h"
+#include "hw.h"
 
 /* MAIN WMI cmd track */
 static struct wmi_cmd_map wmi_cmd_map = {
@@ -884,20 +886,24 @@
 
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
 {
-	int ret;
+	unsigned long time_left;
 
-	ret = wait_for_completion_timeout(&ar->wmi.service_ready,
-					  WMI_SERVICE_READY_TIMEOUT_HZ);
-	return ret;
+	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+						WMI_SERVICE_READY_TIMEOUT_HZ);
+	if (!time_left)
+		return -ETIMEDOUT;
+	return 0;
 }
 
 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
 {
-	int ret;
+	unsigned long time_left;
 
-	ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
-					  WMI_UNIFIED_READY_TIMEOUT_HZ);
-	return ret;
+	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+						WMI_UNIFIED_READY_TIMEOUT_HZ);
+	if (!time_left)
+		return -ETIMEDOUT;
+	return 0;
 }
 
 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
@@ -1351,63 +1357,6 @@
 	return band;
 }
 
-static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
-{
-	u8 rate_idx = 0;
-
-	/* rate in Kbps */
-	switch (rate) {
-	case 1000:
-		rate_idx = 0;
-		break;
-	case 2000:
-		rate_idx = 1;
-		break;
-	case 5500:
-		rate_idx = 2;
-		break;
-	case 11000:
-		rate_idx = 3;
-		break;
-	case 6000:
-		rate_idx = 4;
-		break;
-	case 9000:
-		rate_idx = 5;
-		break;
-	case 12000:
-		rate_idx = 6;
-		break;
-	case 18000:
-		rate_idx = 7;
-		break;
-	case 24000:
-		rate_idx = 8;
-		break;
-	case 36000:
-		rate_idx = 9;
-		break;
-	case 48000:
-		rate_idx = 10;
-		break;
-	case 54000:
-		rate_idx = 11;
-		break;
-	default:
-		break;
-	}
-
-	if (band == IEEE80211_BAND_5GHZ) {
-		if (rate_idx > 3)
-			/* Omit CCK rates */
-			rate_idx -= 4;
-		else
-			rate_idx = 0;
-	}
-
-	return rate_idx;
-}
-
 /* If keys are configured, HW decrypts all frames
  * with protected bit set. Mark such frames as decrypted.
  */
@@ -1489,6 +1438,7 @@
 	struct wmi_mgmt_rx_ev_arg arg = {};
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_hdr *hdr;
+	struct ieee80211_supported_band *sband;
 	u32 rx_status;
 	u32 channel;
 	u32 phy_mode;
@@ -1501,6 +1451,7 @@
 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
 	if (ret) {
 		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
+		dev_kfree_skb(skb);
 		return ret;
 	}
 
@@ -1559,9 +1510,11 @@
 	if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
 
+	sband = &ar->mac.sbands[status->band];
+
 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
-	status->rate_idx = get_rate_idx(rate, status->band);
+	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
 
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
@@ -1585,6 +1538,9 @@
 		}
 	}
 
+	if (ieee80211_is_beacon(hdr->frame_control))
+		ath10k_mac_handle_beacon(ar, skb);
+
 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
 		   skb, skb->len,
@@ -1682,20 +1638,22 @@
 	}
 
 	if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
-		/* During scanning chan info is reported twice for each
-		 * visited channel. The reported cycle count is global
-		 * and per-channel cycle count must be calculated */
+		if (ar->ch_info_can_report_survey) {
+			survey = &ar->survey[idx];
+			survey->noise = noise_floor;
+			survey->filled = SURVEY_INFO_NOISE_DBM;
 
-		cycle_count -= ar->survey_last_cycle_count;
-		rx_clear_count -= ar->survey_last_rx_clear_count;
+			ath10k_hw_fill_survey_time(ar,
+						   survey,
+						   cycle_count,
+						   rx_clear_count,
+						   ar->survey_last_cycle_count,
+						   ar->survey_last_rx_clear_count);
+		}
 
-		survey = &ar->survey[idx];
-		survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
-		survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
-		survey->noise = noise_floor;
-		survey->filled = SURVEY_INFO_TIME |
-				 SURVEY_INFO_TIME_RX |
-				 SURVEY_INFO_NOISE_DBM;
+		ar->ch_info_can_report_survey = false;
+	} else {
+		ar->ch_info_can_report_survey = true;
 	}
 
 	ar->survey_last_rx_clear_count = rx_clear_count;
@@ -2276,109 +2234,25 @@
 		   tim->bitmap_ctrl, pvm_len);
 }
 
-static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
-				   const struct wmi_p2p_noa_info *noa)
-{
-	struct ieee80211_p2p_noa_attr *noa_attr;
-	u8  ctwindow_oppps = noa->ctwindow_oppps;
-	u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
-	bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
-	__le16 *noa_attr_len;
-	u16 attr_len;
-	u8 noa_descriptors = noa->num_descriptors;
-	int i;
-
-	/* P2P IE */
-	data[0] = WLAN_EID_VENDOR_SPECIFIC;
-	data[1] = len - 2;
-	data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
-	data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
-	data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
-	data[5] = WLAN_OUI_TYPE_WFA_P2P;
-
-	/* NOA ATTR */
-	data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
-	noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
-	noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
-
-	noa_attr->index = noa->index;
-	noa_attr->oppps_ctwindow = ctwindow;
-	if (oppps)
-		noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
-
-	for (i = 0; i < noa_descriptors; i++) {
-		noa_attr->desc[i].count =
-			__le32_to_cpu(noa->descriptors[i].type_count);
-		noa_attr->desc[i].duration = noa->descriptors[i].duration;
-		noa_attr->desc[i].interval = noa->descriptors[i].interval;
-		noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
-	}
-
-	attr_len = 2; /* index + oppps_ctwindow */
-	attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-	*noa_attr_len = __cpu_to_le16(attr_len);
-}
-
-static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
-{
-	u32 len = 0;
-	u8 noa_descriptors = noa->num_descriptors;
-	u8 opp_ps_info = noa->ctwindow_oppps;
-	bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
-
-	if (!noa_descriptors && !opps_enabled)
-		return len;
-
-	len += 1 + 1 + 4; /* EID + len + OUI */
-	len += 1 + 2; /* noa attr  + attr len */
-	len += 1 + 1; /* index + oppps_ctwindow */
-	len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-
-	return len;
-}
-
 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
 				  struct sk_buff *bcn,
 				  const struct wmi_p2p_noa_info *noa)
 {
-	u8 *new_data, *old_data = arvif->u.ap.noa_data;
-	u32 new_len;
-
 	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
 		return;
 
 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
-	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
-		new_len = ath10k_p2p_calc_noa_ie_len(noa);
-		if (!new_len)
-			goto cleanup;
 
-		new_data = kmalloc(new_len, GFP_ATOMIC);
-		if (!new_data)
-			goto cleanup;
-
-		ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
-
-		spin_lock_bh(&ar->data_lock);
-		arvif->u.ap.noa_data = new_data;
-		arvif->u.ap.noa_len = new_len;
-		spin_unlock_bh(&ar->data_lock);
-		kfree(old_data);
-	}
+	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+		ath10k_p2p_noa_update(arvif, noa);
 
 	if (arvif->u.ap.noa_data)
 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
 			memcpy(skb_put(bcn, arvif->u.ap.noa_len),
 			       arvif->u.ap.noa_data,
 			       arvif->u.ap.noa_len);
-	return;
 
-cleanup:
-	spin_lock_bh(&ar->data_lock);
-	arvif->u.ap.noa_data = NULL;
-	arvif->u.ap.noa_len = 0;
-	spin_unlock_bh(&ar->data_lock);
-	kfree(old_data);
+	return;
 }
 
 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -2555,6 +2429,7 @@
 				    u64 tsf)
 {
 	u32 reg0, reg1, tsf32l;
+	struct ieee80211_channel *ch;
 	struct pulse_event pe;
 	u64 tsf64;
 	u8 rssi, width;
@@ -2583,6 +2458,15 @@
 	if (!ar->dfs_detector)
 		return;
 
+	spin_lock_bh(&ar->data_lock);
+	ch = ar->rx_channel;
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!ch) {
+		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+		goto radar_detected;
+	}
+
 	/* report event to DFS pattern detector */
 	tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
 	tsf64 = tsf & (~0xFFFFFFFFULL);
@@ -2598,10 +2482,10 @@
 		rssi = 0;
 
 	pe.ts = tsf64;
-	pe.freq = ar->hw->conf.chandef.chan->center_freq;
+	pe.freq = ch->center_freq;
 	pe.width = width;
 	pe.rssi = rssi;
-
+	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
 		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
 		   pe.freq, pe.width, pe.rssi, pe.ts);
@@ -2614,6 +2498,7 @@
 		return;
 	}
 
+radar_detected:
 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
 	ATH10K_DFS_STAT_INC(ar, radar_detected);
 
@@ -2872,7 +2757,43 @@
 
 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
 {
-	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+	struct wmi_roam_ev_arg arg = {};
+	int ret;
+	u32 vdev_id;
+	u32 reason;
+	s32 rssi;
+
+	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+		return;
+	}
+
+	vdev_id = __le32_to_cpu(arg.vdev_id);
+	reason = __le32_to_cpu(arg.reason);
+	rssi = __le32_to_cpu(arg.rssi);
+	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+		   vdev_id, reason, rssi);
+
+	if (reason >= WMI_ROAM_REASON_MAX)
+		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+			    reason, vdev_id);
+
+	switch (reason) {
+	case WMI_ROAM_REASON_BEACON_MISS:
+		ath10k_mac_handle_beacon_miss(ar, vdev_id);
+		break;
+	case WMI_ROAM_REASON_BETTER_AP:
+	case WMI_ROAM_REASON_LOW_RSSI:
+	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+	case WMI_ROAM_REASON_HO_FAILED:
+		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+			    reason, vdev_id);
+		break;
+	}
 }
 
 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
@@ -2942,7 +2863,19 @@
 
 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
 {
-	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+	struct wmi_wow_ev_arg ev = {};
+	int ret;
+
+	complete(&ar->wow.wakeup_completed);
+
+	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+		   wow_reason(ev.wake_reason));
 }
 
 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
@@ -3231,6 +3164,21 @@
 	return 0;
 }
 
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+				      struct wmi_roam_ev_arg *arg)
+{
+	struct wmi_roam_ev *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_id = ev->vdev_id;
+	arg->reason = ev->reason;
+
+	return 0;
+}
+
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_rdy_ev_arg arg = {};
@@ -3275,7 +3223,7 @@
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
-		return;
+		goto out;
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
@@ -3379,6 +3327,7 @@
 		break;
 	}
 
+out:
 	dev_kfree_skb(skb);
 }
 
@@ -3392,7 +3341,7 @@
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
-		return;
+		goto out;
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
@@ -3515,7 +3464,7 @@
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
-		return;
+		goto out;
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
@@ -3623,6 +3572,7 @@
 		break;
 	}
 
+out:
 	dev_kfree_skb(skb);
 }
 
@@ -3989,6 +3939,8 @@
 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
 
 	features = WMI_10_2_RX_BATCH_MODE;
+	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+		features |= WMI_10_2_COEX_GPIO;
 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
 
 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -4315,8 +4267,6 @@
 	const char *cmdname;
 	u32 flags = 0;
 
-	if (WARN_ON(arg->ssid && arg->ssid_len == 0))
-		return ERR_PTR(-EINVAL);
 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
 		return ERR_PTR(-EINVAL);
 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -4539,7 +4489,8 @@
 
 static struct sk_buff *
 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
-			      const u8 peer_addr[ETH_ALEN])
+			      const u8 peer_addr[ETH_ALEN],
+			      enum wmi_peer_type peer_type)
 {
 	struct wmi_peer_create_cmd *cmd;
 	struct sk_buff *skb;
@@ -5223,6 +5174,7 @@
 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5268,6 +5220,7 @@
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
+	/* .gen_adaptive_qcs not implemented */
 };
 
 static const struct wmi_ops wmi_10_1_ops = {
@@ -5290,6 +5243,7 @@
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5330,6 +5284,7 @@
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
+	/* .gen_adaptive_qcs not implemented */
 };
 
 static const struct wmi_ops wmi_10_2_ops = {
@@ -5353,6 +5308,7 @@
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5413,6 +5369,7 @@
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5452,6 +5409,7 @@
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
+	/* .gen_adaptive_qcs not implemented */
 };
 
 int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index adf935b..cf44a3d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -148,6 +148,8 @@
 	WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
 	WMI_SERVICE_MDNS_OFFLOAD,
 	WMI_SERVICE_SAP_AUTH_OFFLOAD,
+	WMI_SERVICE_ATF,
+	WMI_SERVICE_COEX_GPIO,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -177,6 +179,8 @@
 	WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
 	WMI_10X_SERVICE_FORCE_FW_HANG,
 	WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_10X_SERVICE_ATF,
+	WMI_10X_SERVICE_COEX_GPIO,
 };
 
 enum wmi_main_service {
@@ -293,6 +297,8 @@
 	SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
 	SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
 	SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+	SVCSTR(WMI_SERVICE_ATF);
+	SVCSTR(WMI_SERVICE_COEX_GPIO);
 	default:
 		return NULL;
 	}
@@ -356,6 +362,10 @@
 	       WMI_SERVICE_FORCE_FW_HANG, len);
 	SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
 	       WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+	SVCMAP(WMI_10X_SERVICE_ATF,
+	       WMI_SERVICE_ATF, len);
+	SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+	       WMI_SERVICE_COEX_GPIO, len);
 }
 
 static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -552,6 +562,9 @@
 	u32 gpio_output_cmdid;
 	u32 pdev_get_temperature_cmdid;
 	u32 vdev_set_wmm_params_cmdid;
+	u32 tdls_set_state_cmdid;
+	u32 tdls_peer_update_cmdid;
+	u32 adaptive_qcs_cmdid;
 };
 
 /*
@@ -1952,6 +1965,7 @@
 enum wmi_10_2_feature_mask {
 	WMI_10_2_RX_BATCH_MODE = BIT(0),
 	WMI_10_2_ATF_CONFIG    = BIT(1),
+	WMI_10_2_COEX_GPIO     = BIT(3),
 };
 
 struct wmi_resource_config_10_2 {
@@ -2166,6 +2180,7 @@
 	u32 max_scan_time;
 	u32 probe_delay;
 	u32 scan_ctrl_flags;
+	u32 burst_duration_ms;
 
 	u32 ie_len;
 	u32 n_channels;
@@ -4333,6 +4348,12 @@
 	struct wmi_mac_addr peer_macaddr;
 } __packed;
 
+enum wmi_peer_type {
+	WMI_PEER_TYPE_DEFAULT = 0,
+	WMI_PEER_TYPE_BSS = 1,
+	WMI_PEER_TYPE_TDLS = 2,
+};
+
 struct wmi_peer_delete_cmd {
 	__le32 vdev_id;
 	struct wmi_mac_addr peer_macaddr;
@@ -4645,9 +4666,6 @@
 
 #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
 
-/* FIXME: empirically extrapolated */
-#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
-
 /* Beacon filter wmi command info */
 #define BCN_FLT_MAX_SUPPORTED_IES	256
 #define BCN_FLT_MAX_ELEMS_IE_LIST	(BCN_FLT_MAX_SUPPORTED_IES / 32)
@@ -4769,6 +4787,22 @@
 	__le32 config_valid;
 } __packed;
 
+enum wmi_roam_reason {
+	WMI_ROAM_REASON_BETTER_AP = 1,
+	WMI_ROAM_REASON_BEACON_MISS = 2,
+	WMI_ROAM_REASON_LOW_RSSI = 3,
+	WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+	WMI_ROAM_REASON_HO_FAILED = 5,
+
+	/* keep last */
+	WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+	__le32 vdev_id;
+	__le32 reason;
+} __packed;
+
 #define ATH10K_FRAGMT_THRESHOLD_MIN	540
 #define ATH10K_FRAGMT_THRESHOLD_MAX	2346
 
@@ -4857,11 +4891,200 @@
 	const u8 *mac_addr;
 };
 
+struct wmi_roam_ev_arg {
+	__le32 vdev_id;
+	__le32 reason;
+	__le32 rssi;
+};
+
 struct wmi_pdev_temperature_event {
 	/* temperature value in Celcius degree */
 	__le32 temperature;
 } __packed;
 
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+	WOW_BMISS_EVENT = 0,
+	WOW_BETTER_AP_EVENT,
+	WOW_DEAUTH_RECVD_EVENT,
+	WOW_MAGIC_PKT_RECVD_EVENT,
+	WOW_GTK_ERR_EVENT,
+	WOW_FOURWAY_HSHAKE_EVENT,
+	WOW_EAPOL_RECVD_EVENT,
+	WOW_NLO_DETECTED_EVENT,
+	WOW_DISASSOC_RECVD_EVENT,
+	WOW_PATTERN_MATCH_EVENT,
+	WOW_CSA_IE_EVENT,
+	WOW_PROBE_REQ_WPS_IE_EVENT,
+	WOW_AUTH_REQ_EVENT,
+	WOW_ASSOC_REQ_EVENT,
+	WOW_HTT_EVENT,
+	WOW_RA_MATCH_EVENT,
+	WOW_HOST_AUTO_SHUTDOWN_EVENT,
+	WOW_IOAC_MAGIC_EVENT,
+	WOW_IOAC_SHORT_EVENT,
+	WOW_IOAC_EXTEND_EVENT,
+	WOW_IOAC_TIMER_EVENT,
+	WOW_DFS_PHYERR_RADAR_EVENT,
+	WOW_BEACON_EVENT,
+	WOW_CLIENT_KICKOUT_EVENT,
+	WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+	switch (ev) {
+	C2S(WOW_BMISS_EVENT);
+	C2S(WOW_BETTER_AP_EVENT);
+	C2S(WOW_DEAUTH_RECVD_EVENT);
+	C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+	C2S(WOW_GTK_ERR_EVENT);
+	C2S(WOW_FOURWAY_HSHAKE_EVENT);
+	C2S(WOW_EAPOL_RECVD_EVENT);
+	C2S(WOW_NLO_DETECTED_EVENT);
+	C2S(WOW_DISASSOC_RECVD_EVENT);
+	C2S(WOW_PATTERN_MATCH_EVENT);
+	C2S(WOW_CSA_IE_EVENT);
+	C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+	C2S(WOW_AUTH_REQ_EVENT);
+	C2S(WOW_ASSOC_REQ_EVENT);
+	C2S(WOW_HTT_EVENT);
+	C2S(WOW_RA_MATCH_EVENT);
+	C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+	C2S(WOW_IOAC_MAGIC_EVENT);
+	C2S(WOW_IOAC_SHORT_EVENT);
+	C2S(WOW_IOAC_EXTEND_EVENT);
+	C2S(WOW_IOAC_TIMER_EVENT);
+	C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+	C2S(WOW_BEACON_EVENT);
+	C2S(WOW_CLIENT_KICKOUT_EVENT);
+	C2S(WOW_EVENT_MAX);
+	default:
+		return NULL;
+	}
+}
+
+enum wmi_wow_wake_reason {
+	WOW_REASON_UNSPECIFIED = -1,
+	WOW_REASON_NLOD = 0,
+	WOW_REASON_AP_ASSOC_LOST,
+	WOW_REASON_LOW_RSSI,
+	WOW_REASON_DEAUTH_RECVD,
+	WOW_REASON_DISASSOC_RECVD,
+	WOW_REASON_GTK_HS_ERR,
+	WOW_REASON_EAP_REQ,
+	WOW_REASON_FOURWAY_HS_RECV,
+	WOW_REASON_TIMER_INTR_RECV,
+	WOW_REASON_PATTERN_MATCH_FOUND,
+	WOW_REASON_RECV_MAGIC_PATTERN,
+	WOW_REASON_P2P_DISC,
+	WOW_REASON_WLAN_HB,
+	WOW_REASON_CSA_EVENT,
+	WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+	WOW_REASON_AUTH_REQ_RECV,
+	WOW_REASON_ASSOC_REQ_RECV,
+	WOW_REASON_HTT_EVENT,
+	WOW_REASON_RA_MATCH,
+	WOW_REASON_HOST_AUTO_SHUTDOWN,
+	WOW_REASON_IOAC_MAGIC_EVENT,
+	WOW_REASON_IOAC_SHORT_EVENT,
+	WOW_REASON_IOAC_EXTEND_EVENT,
+	WOW_REASON_IOAC_TIMER_EVENT,
+	WOW_REASON_ROAM_HO,
+	WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+	WOW_REASON_BEACON_RECV,
+	WOW_REASON_CLIENT_KICKOUT_EVENT,
+	WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+	switch (reason) {
+	C2S(WOW_REASON_UNSPECIFIED);
+	C2S(WOW_REASON_NLOD);
+	C2S(WOW_REASON_AP_ASSOC_LOST);
+	C2S(WOW_REASON_LOW_RSSI);
+	C2S(WOW_REASON_DEAUTH_RECVD);
+	C2S(WOW_REASON_DISASSOC_RECVD);
+	C2S(WOW_REASON_GTK_HS_ERR);
+	C2S(WOW_REASON_EAP_REQ);
+	C2S(WOW_REASON_FOURWAY_HS_RECV);
+	C2S(WOW_REASON_TIMER_INTR_RECV);
+	C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+	C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+	C2S(WOW_REASON_P2P_DISC);
+	C2S(WOW_REASON_WLAN_HB);
+	C2S(WOW_REASON_CSA_EVENT);
+	C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+	C2S(WOW_REASON_AUTH_REQ_RECV);
+	C2S(WOW_REASON_ASSOC_REQ_RECV);
+	C2S(WOW_REASON_HTT_EVENT);
+	C2S(WOW_REASON_RA_MATCH);
+	C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+	C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+	C2S(WOW_REASON_IOAC_SHORT_EVENT);
+	C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+	C2S(WOW_REASON_IOAC_TIMER_EVENT);
+	C2S(WOW_REASON_ROAM_HO);
+	C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+	C2S(WOW_REASON_BEACON_RECV);
+	C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+	C2S(WOW_REASON_DEBUG_TEST);
+	default:
+		return NULL;
+	}
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+	u32 vdev_id;
+	u32 flag;
+	enum wmi_wow_wake_reason wake_reason;
+	u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE	1
+#define WOW_MAX_PATTERN_SIZE	148
+#define WOW_MAX_PKT_OFFSET	128
+
+enum wmi_tdls_state {
+	WMI_TDLS_DISABLE,
+	WMI_TDLS_ENABLE_PASSIVE,
+	WMI_TDLS_ENABLE_ACTIVE,
+};
+
+enum wmi_tdls_peer_state {
+	WMI_TDLS_PEER_STATE_PEERING,
+	WMI_TDLS_PEER_STATE_CONNECTED,
+	WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+	u32 vdev_id;
+	enum wmi_tdls_peer_state peer_state;
+	u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+struct wmi_tdls_peer_capab_arg {
+	u8 peer_uapsd_queues;
+	u8 peer_max_sp;
+	u32 buff_sta_support;
+	u32 off_chan_support;
+	u32 peer_curr_operclass;
+	u32 self_curr_operclass;
+	u32 peer_chan_len;
+	u32 peer_operclass_len;
+	u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+	u32 is_peer_responder;
+	u32 pref_offchan_num;
+	u32 pref_offchan_bw;
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644
index 0000000..a68d8fd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+	.flags = WIPHY_WOWLAN_DISCONNECT |
+		 WIPHY_WOWLAN_MAGIC_PKT,
+	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
+	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
+	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	int i, ret;
+
+	for (i = 0; i < WOW_EVENT_MAX; i++) {
+		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+		if (ret) {
+			ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+				    wow_wakeup_event(i), arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	for (i = 0; i < ar->wow.max_num_patterns; i++) {
+		ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+		if (ret) {
+			ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+				    i, arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ret = ath10k_wow_vif_cleanup(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+				      struct cfg80211_wowlan *wowlan)
+{
+	int ret, i;
+	unsigned long wow_mask = 0;
+	struct ath10k *ar = arvif->ar;
+	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+	int pattern_id = 0;
+
+	/* Setup requested WOW features */
+	switch (arvif->vdev_type) {
+	case WMI_VDEV_TYPE_IBSS:
+		__set_bit(WOW_BEACON_EVENT, &wow_mask);
+		 /* fall through */
+	case WMI_VDEV_TYPE_AP:
+		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+		__set_bit(WOW_HTT_EVENT, &wow_mask);
+		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+		break;
+	case WMI_VDEV_TYPE_STA:
+		if (wowlan->disconnect) {
+			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+			__set_bit(WOW_BMISS_EVENT, &wow_mask);
+			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+		}
+
+		if (wowlan->magic_pkt)
+			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+		break;
+	default:
+		break;
+	}
+
+	for (i = 0; i < wowlan->n_patterns; i++) {
+		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+		int j;
+
+		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+			continue;
+
+		/* convert bytemask to bitmask */
+		for (j = 0; j < patterns[i].pattern_len; j++)
+			if (patterns[i].mask[j / 8] & BIT(j % 8))
+				bitmask[j] = 0xff;
+
+		ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+						 pattern_id,
+						 patterns[i].pattern,
+						 bitmask,
+						 patterns[i].pattern_len,
+						 patterns[i].pkt_offset);
+		if (ret) {
+			ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+				    pattern_id,
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+
+		pattern_id++;
+		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+	}
+
+	for (i = 0; i < WOW_EVENT_MAX; i++) {
+		if (!test_bit(i, &wow_mask))
+			continue;
+		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+				    wow_wakeup_event(i), arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+				  struct cfg80211_wowlan *wowlan)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+		if (ret) {
+			ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->target_suspend);
+
+	ret = ath10k_wmi_wow_enable(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+	if (ret == 0) {
+		ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->wow.wakeup_completed);
+
+	ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+	if (ret == 0) {
+		ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+			  struct cfg80211_wowlan *wowlan)
+{
+	struct ath10k *ar = hw->priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+			      ar->fw_features))) {
+		ret = 1;
+		goto exit;
+	}
+
+	ret =  ath10k_wow_cleanup(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+			    ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_set_wakeups(ar, wowlan);
+	if (ret) {
+		ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+			    ret);
+		goto cleanup;
+	}
+
+	ret = ath10k_wow_enable(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to start wow: %d\n", ret);
+		goto cleanup;
+	}
+
+	ret = ath10k_hif_suspend(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+		goto wakeup;
+	}
+
+	goto exit;
+
+wakeup:
+	ath10k_wow_wakeup(ar);
+
+cleanup:
+	ath10k_wow_cleanup(ar);
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret ? 1 : 0;
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+			      ar->fw_features))) {
+		ret = 1;
+		goto exit;
+	}
+
+	ret = ath10k_hif_resume(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_wakeup(ar);
+	if (ret)
+		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret ? 1 : 0;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+		return 0;
+
+	if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+		return -EINVAL;
+
+	ar->wow.wowlan_support = ath10k_wowlan_support;
+	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644
index 0000000..abbb04b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+	u32 max_num_patterns;
+	struct completion wakeup_completed;
+	struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+			  struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 7ca0d6f93..e22b0e7 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1280,7 +1280,6 @@
 
 	DECLARE_BITMAP(status, 4);
 #define ATH_STAT_INVALID	0		/* disable hardware accesses */
-#define ATH_STAT_PROMISC	1
 #define ATH_STAT_LEDSOFT	2		/* enable LED gpio status */
 #define ATH_STAT_STARTED	3		/* opened & irqs enabled */
 #define ATH_STAT_RESET		4		/* hw reset */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a613182..23552f4 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2537,12 +2537,12 @@
 
 	/* Initialize driver private data */
 	SET_IEEE80211_DEV(hw, ah->dev);
-	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-			IEEE80211_HW_SIGNAL_DBM |
-			IEEE80211_HW_MFP_CAPABLE |
-			IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-			IEEE80211_HW_SUPPORTS_RC_TABLE;
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
 
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index ca4b7cc..803030f 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -124,7 +124,7 @@
 
 static int
 ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
-		   const char *name, char *trigger)
+		   const char *name, const char *trigger)
 {
 	int err;
 
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 3b4a646..dc44cfe 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -369,7 +369,7 @@
 		       unsigned int *new_flags, u64 multicast)
 {
 #define SUPPORTED_FIF_FLAGS \
-	(FIF_PROMISC_IN_BSS |  FIF_ALLMULTI | FIF_FCSFAIL | \
+	(FIF_ALLMULTI | FIF_FCSFAIL | \
 	FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
 	FIF_BCN_PRBRESP_PROMISC)
 
@@ -393,16 +393,6 @@
 		(AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
 		AR5K_RX_FILTER_MCAST);
 
-	if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
-		if (*new_flags & FIF_PROMISC_IN_BSS)
-			__set_bit(ATH_STAT_PROMISC, ah->status);
-		else
-			__clear_bit(ATH_STAT_PROMISC, ah->status);
-	}
-
-	if (test_bit(ATH_STAT_PROMISC, ah->status))
-		rfilt |= AR5K_RX_FILTER_PROM;
-
 	/* Note, AR5K_RX_FILTER_MCAST is already enabled */
 	if (*new_flags & FIF_ALLMULTI) {
 		mfilt[0] =  ~0;
@@ -418,8 +408,7 @@
 	if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
 		rfilt |= AR5K_RX_FILTER_BEACON;
 
-	/* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
-	 * set we should only pass on control frames for this
+	/* FIF_CONTROL doc says we should only pass on control frames for this
 	 * station. This needs testing. I believe right now this
 	 * enables *all* control frames, which is OK.. but
 	 * but we should see if we can improve on granularity */
@@ -809,7 +798,6 @@
 	.sw_scan_start		= ath5k_sw_scan_start,
 	.sw_scan_complete	= ath5k_sw_scan_complete,
 	.get_stats		= ath5k_get_stats,
-	/* .get_tkip_seq	= not implemented */
 	/* .set_frag_threshold	= not implemented */
 	/* .set_rts_threshold	= not implemented */
 	/* .sta_add		= not implemented */
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index cce4625..a511ef3 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -889,7 +889,7 @@
 					GFP_KERNEL);
 	} else if (vif->sme_state == SME_CONNECTED) {
 		cfg80211_disconnected(vif->ndev, proto_reason,
-				      NULL, 0, GFP_KERNEL);
+				      NULL, 0, false, GFP_KERNEL);
 	}
 
 	vif->sme_state = SME_DISCONNECTED;
@@ -3467,7 +3467,7 @@
 					GFP_KERNEL);
 		break;
 	case SME_CONNECTED:
-		cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+		cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL);
 		break;
 	}
 
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 19f88b4..05d25a9 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -1527,8 +1527,8 @@
 			__le32 nw_type;
 		} sta;
 		struct {
-			u8 phymode;
 			u8 aid;
+			u8 phymode;
 			u8 mac_addr[ETH_ALEN];
 			u8 auth;
 			u8 keymgmt;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 6c23d27..8f87930 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -254,86 +254,25 @@
 	return 0;
 }
 
-/**
- * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
-				    struct ath9k_channel *chan)
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+			  struct ath9k_channel *chan, int bin)
 {
-	int bb_spur = AR_NO_SPUR;
-	int bin, cur_bin;
-	int spur_freq_sd;
-	int spur_delta_phase;
-	int denominator;
+	int cur_bin;
 	int upper, lower, cur_vit_mask;
-	int tmp, new;
 	int i;
-	static int pilot_mask_reg[4] = {
-		AR_PHY_TIMING7, AR_PHY_TIMING8,
-		AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
-	};
-	static int chan_mask_reg[4] = {
-		AR_PHY_TIMING9, AR_PHY_TIMING10,
-		AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
-	};
-	static int inc[4] = { 0, 100, 0, 0 };
-
 	int8_t mask_m[123];
 	int8_t mask_p[123];
 	int8_t mask_amt;
 	int tmp_mask;
-	int cur_bb_spur;
-	bool is2GHz = IS_CHAN_2GHZ(chan);
-
-	memset(&mask_m, 0, sizeof(int8_t) * 123);
-	memset(&mask_p, 0, sizeof(int8_t) * 123);
-
-	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-		if (AR_NO_SPUR == cur_bb_spur)
-			break;
-		cur_bb_spur = cur_bb_spur - (chan->channel * 10);
-		if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
-			bb_spur = cur_bb_spur;
-			break;
-		}
-	}
-
-	if (AR_NO_SPUR == bb_spur)
-		return;
-
-	bin = bb_spur * 32;
-
-	tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-	new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
-		     AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
-		     AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
-		     AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
-	REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
-	new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
-	       AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
-	       AR_PHY_SPUR_REG_MASK_RATE_SELECT |
-	       AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
-	       SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
-	REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
-	spur_delta_phase = ((bb_spur * 524288) / 100) &
-		AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
-	denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
-	spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
-	new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
-	       SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
-	       SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
-	REG_WRITE(ah, AR_PHY_TIMING11, new);
+	static const int pilot_mask_reg[4] = {
+		AR_PHY_TIMING7, AR_PHY_TIMING8,
+		AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+	};
+	static const int chan_mask_reg[4] = {
+		AR_PHY_TIMING9, AR_PHY_TIMING10,
+		AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+	};
+	static const int inc[4] = { 0, 100, 0, 0 };
 
 	cur_bin = -6000;
 	upper = bin + 100;
@@ -343,6 +282,7 @@
 		int pilot_mask = 0;
 		int chan_mask = 0;
 		int bp = 0;
+
 		for (bp = 0; bp < 30; bp++) {
 			if ((cur_bin > lower) && (cur_bin < upper)) {
 				pilot_mask = pilot_mask | 0x1 << bp;
@@ -361,7 +301,6 @@
 
 	for (i = 0; i < 123; i++) {
 		if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
 			/* workaround for gcc bug #37014 */
 			volatile int tmp_v = abs(cur_vit_mask - bin);
 
@@ -467,6 +406,78 @@
 }
 
 /**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+				    struct ath9k_channel *chan)
+{
+	int bb_spur = AR_NO_SPUR;
+	int bin;
+	int spur_freq_sd;
+	int spur_delta_phase;
+	int denominator;
+	int tmp, new;
+	int i;
+
+	int8_t mask_m[123];
+	int8_t mask_p[123];
+	int cur_bb_spur;
+	bool is2GHz = IS_CHAN_2GHZ(chan);
+
+	memset(&mask_m, 0, sizeof(int8_t) * 123);
+	memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+		if (AR_NO_SPUR == cur_bb_spur)
+			break;
+		cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+		if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+			bb_spur = cur_bb_spur;
+			break;
+		}
+	}
+
+	if (AR_NO_SPUR == bb_spur)
+		return;
+
+	bin = bb_spur * 32;
+
+	tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+	new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+		     AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+		     AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+		     AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+	REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+	new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+	       AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+	       AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+	       AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+	       SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+	REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+	spur_delta_phase = ((bb_spur * 524288) / 100) &
+		AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+	denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+	spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+	new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+	       SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+	       SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+	REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+	ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
+}
+
+/**
  * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
  * @ah: atheros hardware structure
  *
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index fc08162..db66245 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -169,29 +169,17 @@
 {
 	int bb_spur = AR_NO_SPUR;
 	int freq;
-	int bin, cur_bin;
+	int bin;
 	int bb_spur_off, spur_subchannel_sd;
 	int spur_freq_sd;
 	int spur_delta_phase;
 	int denominator;
-	int upper, lower, cur_vit_mask;
 	int tmp, newVal;
 	int i;
-	static const int pilot_mask_reg[4] = {
-		AR_PHY_TIMING7, AR_PHY_TIMING8,
-		AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
-	};
-	static const int chan_mask_reg[4] = {
-		AR_PHY_TIMING9, AR_PHY_TIMING10,
-		AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
-	};
-	static const int inc[4] = { 0, 100, 0, 0 };
 	struct chan_centers centers;
 
 	int8_t mask_m[123];
 	int8_t mask_p[123];
-	int8_t mask_amt;
-	int tmp_mask;
 	int cur_bb_spur;
 	bool is2GHz = IS_CHAN_2GHZ(chan);
 
@@ -288,135 +276,7 @@
 	newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
 	REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
 
-	cur_bin = -6000;
-	upper = bin + 100;
-	lower = bin - 100;
-
-	for (i = 0; i < 4; i++) {
-		int pilot_mask = 0;
-		int chan_mask = 0;
-		int bp = 0;
-		for (bp = 0; bp < 30; bp++) {
-			if ((cur_bin > lower) && (cur_bin < upper)) {
-				pilot_mask = pilot_mask | 0x1 << bp;
-				chan_mask = chan_mask | 0x1 << bp;
-			}
-			cur_bin += 100;
-		}
-		cur_bin += inc[i];
-		REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
-		REG_WRITE(ah, chan_mask_reg[i], chan_mask);
-	}
-
-	cur_vit_mask = 6100;
-	upper = bin + 120;
-	lower = bin - 120;
-
-	for (i = 0; i < 123; i++) {
-		if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
-			/* workaround for gcc bug #37014 */
-			volatile int tmp_v = abs(cur_vit_mask - bin);
-
-			if (tmp_v < 75)
-				mask_amt = 1;
-			else
-				mask_amt = 0;
-			if (cur_vit_mask < 0)
-				mask_m[abs(cur_vit_mask / 100)] = mask_amt;
-			else
-				mask_p[cur_vit_mask / 100] = mask_amt;
-		}
-		cur_vit_mask -= 100;
-	}
-
-	tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
-		| (mask_m[48] << 26) | (mask_m[49] << 24)
-		| (mask_m[50] << 22) | (mask_m[51] << 20)
-		| (mask_m[52] << 18) | (mask_m[53] << 16)
-		| (mask_m[54] << 14) | (mask_m[55] << 12)
-		| (mask_m[56] << 10) | (mask_m[57] << 8)
-		| (mask_m[58] << 6) | (mask_m[59] << 4)
-		| (mask_m[60] << 2) | (mask_m[61] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
-	REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
-	tmp_mask = (mask_m[31] << 28)
-		| (mask_m[32] << 26) | (mask_m[33] << 24)
-		| (mask_m[34] << 22) | (mask_m[35] << 20)
-		| (mask_m[36] << 18) | (mask_m[37] << 16)
-		| (mask_m[48] << 14) | (mask_m[39] << 12)
-		| (mask_m[40] << 10) | (mask_m[41] << 8)
-		| (mask_m[42] << 6) | (mask_m[43] << 4)
-		| (mask_m[44] << 2) | (mask_m[45] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
-	tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
-		| (mask_m[18] << 26) | (mask_m[18] << 24)
-		| (mask_m[20] << 22) | (mask_m[20] << 20)
-		| (mask_m[22] << 18) | (mask_m[22] << 16)
-		| (mask_m[24] << 14) | (mask_m[24] << 12)
-		| (mask_m[25] << 10) | (mask_m[26] << 8)
-		| (mask_m[27] << 6) | (mask_m[28] << 4)
-		| (mask_m[29] << 2) | (mask_m[30] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
-	tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
-		| (mask_m[2] << 26) | (mask_m[3] << 24)
-		| (mask_m[4] << 22) | (mask_m[5] << 20)
-		| (mask_m[6] << 18) | (mask_m[7] << 16)
-		| (mask_m[8] << 14) | (mask_m[9] << 12)
-		| (mask_m[10] << 10) | (mask_m[11] << 8)
-		| (mask_m[12] << 6) | (mask_m[13] << 4)
-		| (mask_m[14] << 2) | (mask_m[15] << 0);
-	REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
-	tmp_mask = (mask_p[15] << 28)
-		| (mask_p[14] << 26) | (mask_p[13] << 24)
-		| (mask_p[12] << 22) | (mask_p[11] << 20)
-		| (mask_p[10] << 18) | (mask_p[9] << 16)
-		| (mask_p[8] << 14) | (mask_p[7] << 12)
-		| (mask_p[6] << 10) | (mask_p[5] << 8)
-		| (mask_p[4] << 6) | (mask_p[3] << 4)
-		| (mask_p[2] << 2) | (mask_p[1] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
-	tmp_mask = (mask_p[30] << 28)
-		| (mask_p[29] << 26) | (mask_p[28] << 24)
-		| (mask_p[27] << 22) | (mask_p[26] << 20)
-		| (mask_p[25] << 18) | (mask_p[24] << 16)
-		| (mask_p[23] << 14) | (mask_p[22] << 12)
-		| (mask_p[21] << 10) | (mask_p[20] << 8)
-		| (mask_p[19] << 6) | (mask_p[18] << 4)
-		| (mask_p[17] << 2) | (mask_p[16] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
-	tmp_mask = (mask_p[45] << 28)
-		| (mask_p[44] << 26) | (mask_p[43] << 24)
-		| (mask_p[42] << 22) | (mask_p[41] << 20)
-		| (mask_p[40] << 18) | (mask_p[39] << 16)
-		| (mask_p[38] << 14) | (mask_p[37] << 12)
-		| (mask_p[36] << 10) | (mask_p[35] << 8)
-		| (mask_p[34] << 6) | (mask_p[33] << 4)
-		| (mask_p[32] << 2) | (mask_p[31] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
-	tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
-		| (mask_p[59] << 26) | (mask_p[58] << 24)
-		| (mask_p[57] << 22) | (mask_p[56] << 20)
-		| (mask_p[55] << 18) | (mask_p[54] << 16)
-		| (mask_p[53] << 14) | (mask_p[52] << 12)
-		| (mask_p[51] << 10) | (mask_p[50] << 8)
-		| (mask_p[49] << 6) | (mask_p[48] << 4)
-		| (mask_p[47] << 2) | (mask_p[46] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+	ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
 
 	REGWRITE_BUFFER_FLUSH(ah);
 }
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 5cee231..a876271 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/relay.h>
+#include <linux/random.h>
 #include "ath9k.h"
 
 static s8 fix_rssi_inv_only(u8 rssi_val)
@@ -36,21 +37,480 @@
 	relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
 }
 
+typedef int (ath_cmn_fft_idx_validator) (u8 *sample_end, int bytes_read);
+
+static int
+ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
+{
+	struct ath_ht20_mag_info *mag_info;
+	u8 *sample;
+	u16 max_magnitude;
+	u8 max_index;
+	u8 max_exp;
+
+	/* Sanity check so that we don't read outside the read
+	 * buffer
+	 */
+	if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN - 1)
+		return -1;
+
+	mag_info = (struct ath_ht20_mag_info *) (sample_end -
+				sizeof(struct ath_ht20_mag_info) + 1);
+
+	sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
+
+	max_index = spectral_max_index(mag_info->all_bins,
+				       SPECTRAL_HT20_NUM_BINS);
+	max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+
+	max_exp = mag_info->max_exp & 0xf;
+
+	/* Don't try to read something outside the read buffer
+	 * in case of a missing byte (so bins[0] will be outside
+	 * the read buffer)
+	 */
+	if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
+		return -1;
+
+	if (sample[max_index] != (max_magnitude >> max_exp))
+		return -1;
+	else
+		return 0;
+}
+
+static int
+ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
+{
+	struct ath_ht20_40_mag_info *mag_info;
+	u8 *sample;
+	u16 lower_mag, upper_mag;
+	u8 lower_max_index, upper_max_index;
+	u8 max_exp;
+	int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+
+	/* Sanity check so that we don't read outside the read
+	 * buffer
+	 */
+	if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN - 1)
+		return -1;
+
+	mag_info = (struct ath_ht20_40_mag_info *) (sample_end -
+				sizeof(struct ath_ht20_40_mag_info) + 1);
+
+	sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
+
+	lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+	lower_max_index = spectral_max_index(mag_info->lower_bins,
+					     SPECTRAL_HT20_40_NUM_BINS);
+
+	upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+	upper_max_index = spectral_max_index(mag_info->upper_bins,
+					     SPECTRAL_HT20_40_NUM_BINS);
+
+	max_exp = mag_info->max_exp & 0xf;
+
+	/* Don't try to read something outside the read buffer
+	 * in case of a missing byte (so bins[0] will be outside
+	 * the read buffer)
+	 */
+	if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN &&
+	   ((upper_max_index < 1) || (lower_max_index < 1)))
+		return -1;
+
+	/* Some time hardware messes up the index and adds
+	 * the index of the middle point (dc_pos). Try to fix it.
+	 */
+	if ((upper_max_index - dc_pos > 0) &&
+	   (sample[upper_max_index] == (upper_mag >> max_exp)))
+		upper_max_index -= dc_pos;
+
+	if ((lower_max_index - dc_pos > 0) &&
+	   (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
+		lower_max_index -= dc_pos;
+
+	if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
+	   (sample[lower_max_index] != (lower_mag >> max_exp)))
+		return -1;
+	else
+		return 0;
+}
+
+typedef int (ath_cmn_fft_sample_handler) (struct ath_rx_status *rs,
+			struct ath_spec_scan_priv *spec_priv,
+			u8 *sample_buf, u64 tsf, u16 freq, int chan_type);
+
+static int
+ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
+			struct ath_spec_scan_priv *spec_priv,
+			u8 *sample_buf,
+			u64 tsf, u16 freq, int chan_type)
+{
+	struct fft_sample_ht20 fft_sample_20;
+	struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+	struct ath_hw *ah = spec_priv->ah;
+	struct ath_ht20_mag_info *mag_info;
+	struct fft_sample_tlv *tlv;
+	int i = 0;
+	int ret = 0;
+	int dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+	u16 magnitude, tmp_mag, length;
+	u8 max_index, bitmap_w, max_exp;
+
+	length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+	fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+	fft_sample_20.tlv.length = __cpu_to_be16(length);
+	fft_sample_20.freq = __cpu_to_be16(freq);
+	fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+	fft_sample_20.noise = ah->noise;
+
+	mag_info = (struct ath_ht20_mag_info *) (sample_buf +
+					SPECTRAL_HT20_NUM_BINS);
+
+	magnitude = spectral_max_magnitude(mag_info->all_bins);
+	fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+	max_index = spectral_max_index(mag_info->all_bins,
+					SPECTRAL_HT20_NUM_BINS);
+	fft_sample_20.max_index = max_index;
+
+	bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+	fft_sample_20.bitmap_weight = bitmap_w;
+
+	max_exp = mag_info->max_exp & 0xf;
+	fft_sample_20.max_exp = max_exp;
+
+	fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+	memcpy(fft_sample_20.data, sample_buf, SPECTRAL_HT20_NUM_BINS);
+
+	ath_dbg(common, SPECTRAL_SCAN, "FFT HT20 frame: max mag 0x%X,"
+					"max_mag_idx %i\n",
+					magnitude >> max_exp,
+					max_index);
+
+	if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
+		ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+		ret = -1;
+	}
+
+	/* DC value (value in the middle) is the blind spot of the spectral
+	 * sample and invalid, interpolate it.
+	 */
+	fft_sample_20.data[dc_pos] = (fft_sample_20.data[dc_pos + 1] +
+					fft_sample_20.data[dc_pos - 1]) / 2;
+
+	/* Check if the maximum magnitude is indeed maximum,
+	 * also if the maximum value was at dc_pos, calculate
+	 * a new one (since value at dc_pos is invalid).
+	 */
+	if (max_index == dc_pos) {
+		tmp_mag = 0;
+		for (i = 0; i < dc_pos; i++) {
+			if (fft_sample_20.data[i] > tmp_mag) {
+				tmp_mag = fft_sample_20.data[i];
+				fft_sample_20.max_index = i;
+			}
+		}
+
+		magnitude = tmp_mag << max_exp;
+		fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+		ath_dbg(common, SPECTRAL_SCAN,
+			"Calculated new lower max 0x%X at %i\n",
+			tmp_mag, fft_sample_20.max_index);
+	} else
+	for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++) {
+		if (fft_sample_20.data[i] == (magnitude >> max_exp))
+			ath_dbg(common, SPECTRAL_SCAN,
+				"Got max: 0x%X at index %i\n",
+				fft_sample_20.data[i], i);
+
+		if (fft_sample_20.data[i] > (magnitude >> max_exp)) {
+			ath_dbg(common, SPECTRAL_SCAN,
+				"Got bin %i greater than max: 0x%X\n",
+				i, fft_sample_20.data[i]);
+			ret = -1;
+		}
+	}
+
+	if (ret < 0)
+		return ret;
+
+	tlv = (struct fft_sample_tlv *)&fft_sample_20;
+
+	ath_debug_send_fft_sample(spec_priv, tlv);
+
+	return 0;
+}
+
+static int
+ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
+			struct ath_spec_scan_priv *spec_priv,
+			u8 *sample_buf,
+			u64 tsf, u16 freq, int chan_type)
+{
+	struct fft_sample_ht20_40 fft_sample_40;
+	struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+	struct ath_hw *ah = spec_priv->ah;
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
+	struct ath_ht20_40_mag_info *mag_info;
+	struct fft_sample_tlv *tlv;
+	int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+	int i = 0;
+	int ret = 0;
+	s16 ext_nf;
+	u16 lower_mag, upper_mag, tmp_mag, length;
+	s8 lower_rssi, upper_rssi;
+	u8 lower_max_index, upper_max_index;
+	u8 lower_bitmap_w, upper_bitmap_w, max_exp;
+
+	if (caldata)
+		ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+				caldata->nfCalHist[3].privNF);
+	else
+		ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+	length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+	fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+	fft_sample_40.tlv.length = __cpu_to_be16(length);
+	fft_sample_40.freq = __cpu_to_be16(freq);
+	fft_sample_40.channel_type = chan_type;
+
+	if (chan_type == NL80211_CHAN_HT40PLUS) {
+		lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+		upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+		fft_sample_40.lower_noise = ah->noise;
+		fft_sample_40.upper_noise = ext_nf;
+	} else {
+		lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+		upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+		fft_sample_40.lower_noise = ext_nf;
+		fft_sample_40.upper_noise = ah->noise;
+	}
+
+	fft_sample_40.lower_rssi = lower_rssi;
+	fft_sample_40.upper_rssi = upper_rssi;
+
+	mag_info = (struct ath_ht20_40_mag_info *) (sample_buf +
+					SPECTRAL_HT20_40_NUM_BINS);
+
+	lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+	fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+	upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+	fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+	lower_max_index = spectral_max_index(mag_info->lower_bins,
+					SPECTRAL_HT20_40_NUM_BINS);
+	fft_sample_40.lower_max_index = lower_max_index;
+
+	upper_max_index = spectral_max_index(mag_info->upper_bins,
+					SPECTRAL_HT20_40_NUM_BINS);
+	fft_sample_40.upper_max_index = upper_max_index;
+
+	lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+	fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+
+	upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+	fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+
+	max_exp = mag_info->max_exp & 0xf;
+	fft_sample_40.max_exp = max_exp;
+
+	fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+	memcpy(fft_sample_40.data, sample_buf, SPECTRAL_HT20_40_NUM_BINS);
+
+	ath_dbg(common, SPECTRAL_SCAN, "FFT HT20/40 frame: lower mag 0x%X,"
+					"lower_mag_idx %i, upper mag 0x%X,"
+					"upper_mag_idx %i\n",
+					lower_mag >> max_exp,
+					lower_max_index,
+					upper_mag >> max_exp,
+					upper_max_index);
+
+	/* Some time hardware messes up the index and adds
+	 * the index of the middle point (dc_pos). Try to fix it.
+	 */
+	if ((upper_max_index - dc_pos > 0) &&
+	   (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
+		upper_max_index -= dc_pos;
+		fft_sample_40.upper_max_index = upper_max_index;
+	}
+
+	if ((lower_max_index - dc_pos > 0) &&
+	   (fft_sample_40.data[lower_max_index - dc_pos] ==
+	   (lower_mag >> max_exp))) {
+		lower_max_index -= dc_pos;
+		fft_sample_40.lower_max_index = lower_max_index;
+	}
+
+	/* Check if we got the expected magnitude values at
+	 * the expected bins
+	 */
+	if ((fft_sample_40.data[upper_max_index + dc_pos]
+	    != (upper_mag >> max_exp)) ||
+	   (fft_sample_40.data[lower_max_index]
+	    != (lower_mag >> max_exp))) {
+		ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+		ret = -1;
+	}
+
+	/* DC value (value in the middle) is the blind spot of the spectral
+	 * sample and invalid, interpolate it.
+	 */
+	fft_sample_40.data[dc_pos] = (fft_sample_40.data[dc_pos + 1] +
+					fft_sample_40.data[dc_pos - 1]) / 2;
+
+	/* Check if the maximum magnitudes are indeed maximum,
+	 * also if the maximum value was at dc_pos, calculate
+	 * a new one (since value at dc_pos is invalid).
+	 */
+	if (lower_max_index == dc_pos) {
+		tmp_mag = 0;
+		for (i = 0; i < dc_pos; i++) {
+			if (fft_sample_40.data[i] > tmp_mag) {
+				tmp_mag = fft_sample_40.data[i];
+				fft_sample_40.lower_max_index = i;
+			}
+		}
+
+		lower_mag = tmp_mag << max_exp;
+		fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+		ath_dbg(common, SPECTRAL_SCAN,
+			"Calculated new lower max 0x%X at %i\n",
+			tmp_mag, fft_sample_40.lower_max_index);
+	} else
+	for (i = 0; i < dc_pos; i++) {
+		if (fft_sample_40.data[i] == (lower_mag >> max_exp))
+			ath_dbg(common, SPECTRAL_SCAN,
+				"Got lower mag: 0x%X at index %i\n",
+				fft_sample_40.data[i], i);
+
+		if (fft_sample_40.data[i] > (lower_mag >> max_exp)) {
+			ath_dbg(common, SPECTRAL_SCAN,
+				"Got lower bin %i higher than max: 0x%X\n",
+				i, fft_sample_40.data[i]);
+			ret = -1;
+		}
+	}
+
+	if (upper_max_index == dc_pos) {
+		tmp_mag = 0;
+		for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+			if (fft_sample_40.data[i] > tmp_mag) {
+				tmp_mag = fft_sample_40.data[i];
+				fft_sample_40.upper_max_index = i;
+			}
+		}
+		upper_mag = tmp_mag << max_exp;
+		fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+		ath_dbg(common, SPECTRAL_SCAN,
+			"Calculated new upper max 0x%X at %i\n",
+			tmp_mag, i);
+	} else
+	for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+		if (fft_sample_40.data[i] == (upper_mag >> max_exp))
+			ath_dbg(common, SPECTRAL_SCAN,
+				"Got upper mag: 0x%X at index %i\n",
+				fft_sample_40.data[i], i);
+
+		if (fft_sample_40.data[i] > (upper_mag >> max_exp)) {
+			ath_dbg(common, SPECTRAL_SCAN,
+				"Got upper bin %i higher than max: 0x%X\n",
+				i, fft_sample_40.data[i]);
+
+			ret = -1;
+		}
+	}
+
+	if (ret < 0)
+		return ret;
+
+	tlv = (struct fft_sample_tlv *)&fft_sample_40;
+
+	ath_debug_send_fft_sample(spec_priv, tlv);
+
+	return 0;
+}
+
+static inline void
+ath_cmn_copy_fft_frame(u8 *in, u8 *out, int sample_len, int sample_bytes)
+{
+	switch (sample_bytes - sample_len) {
+	case -1:
+		/* First byte missing */
+		memcpy(&out[1], in,
+		       sample_len - 1);
+		break;
+	case 0:
+		/* Length correct, nothing to do. */
+		memcpy(out, in, sample_len);
+		break;
+	case 1:
+		/* MAC added 2 extra bytes AND first byte
+		 * is missing.
+		 */
+		memcpy(&out[1], in, 30);
+		out[31] = in[31];
+		memcpy(&out[32], &in[33],
+		       sample_len - 32);
+		break;
+	case 2:
+		/* MAC added 2 extra bytes at bin 30 and 32,
+		 * remove them.
+		 */
+		memcpy(out, in, 30);
+		out[30] = in[31];
+		memcpy(&out[31], &in[33],
+		       sample_len - 31);
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
+{
+	int i = 0;
+	int ret = 0;
+	struct rchan *rc = spec_priv->rfs_chan_spec_scan;
+
+	for_each_online_cpu(i)
+		ret += relay_buf_full(rc->buf[i]);
+
+	i = num_online_cpus();
+
+	if (ret == i)
+		return 1;
+	else
+		return 0;
+}
+
 /* returns 1 if this was a spectral frame, even if not handled. */
 int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
 		    struct ath_rx_status *rs, u64 tsf)
 {
+	u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
 	struct ath_hw *ah = spec_priv->ah;
 	struct ath_common *common = ath9k_hw_common(spec_priv->ah);
-	u8 num_bins, *bins, *vdata = (u8 *)hdr;
-	struct fft_sample_ht20 fft_sample_20;
-	struct fft_sample_ht20_40 fft_sample_40;
-	struct fft_sample_tlv *tlv;
+	u8 num_bins, *vdata = (u8 *)hdr;
 	struct ath_radar_info *radar_info;
 	int len = rs->rs_datalen;
-	int dc_pos;
-	u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+	int i;
+	int got_slen = 0;
+	u8  *sample_start;
+	int sample_bytes = 0;
+	int ret = 0;
+	u16 fft_len, sample_len, freq = ah->curchan->chan->center_freq;
 	enum nl80211_channel_type chan_type;
+	ath_cmn_fft_idx_validator *fft_idx_validator;
+	ath_cmn_fft_sample_handler *fft_handler;
 
 	/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
 	 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -68,140 +528,170 @@
 	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
 		return 0;
 
+	/* Output buffers are full, no need to process anything
+	 * since there is no space to put the result anyway
+	 */
+	ret = ath_cmn_is_fft_buf_full(spec_priv);
+	if (ret == 1) {
+		ath_dbg(common, SPECTRAL_SCAN, "FFT report ignored, no space "
+						"left on output buffers\n");
+		return 1;
+	}
+
 	chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
 	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
 	    (chan_type == NL80211_CHAN_HT40PLUS)) {
 		fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+		sample_len = SPECTRAL_HT20_40_SAMPLE_LEN;
 		num_bins = SPECTRAL_HT20_40_NUM_BINS;
-		bins = (u8 *)fft_sample_40.data;
+		fft_idx_validator = &ath_cmn_max_idx_verify_ht20_40_fft;
+		fft_handler = &ath_cmn_process_ht20_40_fft;
 	} else {
 		fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+		sample_len = SPECTRAL_HT20_SAMPLE_LEN;
 		num_bins = SPECTRAL_HT20_NUM_BINS;
-		bins = (u8 *)fft_sample_20.data;
+		fft_idx_validator = ath_cmn_max_idx_verify_ht20_fft;
+		fft_handler = &ath_cmn_process_ht20_fft;
 	}
 
-	/* Variation in the data length is possible and will be fixed later */
-	if ((len > fft_len + 2) || (len < fft_len - 1))
-		return 1;
+	ath_dbg(common, SPECTRAL_SCAN, "Got radar dump bw_info: 0x%X,"
+					"len: %i fft_len: %i\n",
+					radar_info->pulse_bw_info,
+					len,
+					fft_len);
+	sample_start = vdata;
+	for (i = 0; i < len - 2; i++) {
+		sample_bytes++;
 
-	switch (len - fft_len) {
-	case 0:
-		/* length correct, nothing to do. */
-		memcpy(bins, vdata, num_bins);
-		break;
-	case -1:
-		/* first byte missing, duplicate it. */
-		memcpy(&bins[1], vdata, num_bins - 1);
-		bins[0] = vdata[0];
-		break;
-	case 2:
-		/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
-		memcpy(bins, vdata, 30);
-		bins[30] = vdata[31];
-		memcpy(&bins[31], &vdata[33], num_bins - 31);
-		break;
-	case 1:
-		/* MAC added 2 extra bytes AND first byte is missing. */
-		bins[0] = vdata[0];
-		memcpy(&bins[1], vdata, 30);
-		bins[31] = vdata[31];
-		memcpy(&bins[32], &vdata[33], num_bins - 32);
-		break;
-	default:
-		return 1;
-	}
-
-	/* DC value (value in the middle) is the blind spot of the spectral
-	 * sample and invalid, interpolate it.
-	 */
-	dc_pos = num_bins / 2;
-	bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
-	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
-	    (chan_type == NL80211_CHAN_HT40PLUS)) {
-		s8 lower_rssi, upper_rssi;
-		s16 ext_nf;
-		u8 lower_max_index, upper_max_index;
-		u8 lower_bitmap_w, upper_bitmap_w;
-		u16 lower_mag, upper_mag;
-		struct ath9k_hw_cal_data *caldata = ah->caldata;
-		struct ath_ht20_40_mag_info *mag_info;
-
-		if (caldata)
-			ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
-					caldata->nfCalHist[3].privNF);
-		else
-			ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
-		length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
-		fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
-		fft_sample_40.tlv.length = __cpu_to_be16(length);
-		fft_sample_40.freq = __cpu_to_be16(freq);
-		fft_sample_40.channel_type = chan_type;
-
-		if (chan_type == NL80211_CHAN_HT40PLUS) {
-			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
-
-			fft_sample_40.lower_noise = ah->noise;
-			fft_sample_40.upper_noise = ext_nf;
-		} else {
-			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
-			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-
-			fft_sample_40.lower_noise = ext_nf;
-			fft_sample_40.upper_noise = ah->noise;
+		/* Only a single sample received, no need to look
+		 * for the sample's end, do the correction based
+		 * on the packet's length instead. Note that hw
+		 * will always put the radar_info structure on
+		 * the end.
+		 */
+		if (len <= fft_len + 2) {
+			sample_bytes = len - sizeof(struct ath_radar_info);
+			got_slen = 1;
 		}
-		fft_sample_40.lower_rssi = lower_rssi;
-		fft_sample_40.upper_rssi = upper_rssi;
 
-		mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
-		lower_mag = spectral_max_magnitude(mag_info->lower_bins);
-		upper_mag = spectral_max_magnitude(mag_info->upper_bins);
-		fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
-		fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
-		lower_max_index = spectral_max_index(mag_info->lower_bins);
-		upper_max_index = spectral_max_index(mag_info->upper_bins);
-		fft_sample_40.lower_max_index = lower_max_index;
-		fft_sample_40.upper_max_index = upper_max_index;
-		lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
-		upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
-		fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
-		fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
-		fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+		/* Search for the end of the FFT frame between
+		 * sample_len - 1 and sample_len + 2. exp_max is 3
+		 * bits long and it's the only value on the last
+		 * byte of the frame so since it'll be smaller than
+		 * the next byte (the first bin of the next sample)
+		 * 90% of the time, we can use it as a separator.
+		 */
+		if (vdata[i] <= 0x7 && sample_bytes >= sample_len - 1) {
 
-		fft_sample_40.tsf = __cpu_to_be64(tsf);
+			/* Got a frame length within boundaries, there are
+			 * four scenarios here:
+			 *
+			 * a) sample_len -> We got the correct length
+			 * b) sample_len + 2 -> 2 bytes added around bin[31]
+			 * c) sample_len - 1 -> The first byte is missing
+			 * d) sample_len + 1 -> b + c at the same time
+			 *
+			 * When MAC adds 2 extra bytes, bin[31] and bin[32]
+			 * have the same value, so we can use that for further
+			 * verification in cases b and d.
+			 */
 
-		tlv = (struct fft_sample_tlv *)&fft_sample_40;
-	} else {
-		u8 max_index, bitmap_w;
-		u16 magnitude;
-		struct ath_ht20_mag_info *mag_info;
+			/* Did we go too far ? If so we couldn't determine
+			 * this sample's boundaries, discard any further
+			 * data
+			 */
+			if ((sample_bytes > sample_len + 2) ||
+			   ((sample_bytes > sample_len) &&
+			   (sample_start[31] != sample_start[32])))
+				break;
 
-		length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
-		fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
-		fft_sample_20.tlv.length = __cpu_to_be16(length);
-		fft_sample_20.freq = __cpu_to_be16(freq);
+			/* See if we got a valid frame by checking the
+			 * consistency of mag_info fields. This is to
+			 * prevent from "fixing" a correct frame.
+			 * Failure is non-fatal, later frames may
+			 * be valid.
+			 */
+			if (!fft_idx_validator(&vdata[i], i)) {
+				ath_dbg(common, SPECTRAL_SCAN,
+					"Found valid fft frame at %i\n", i);
+				got_slen = 1;
+			}
 
-		fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-		fft_sample_20.noise = ah->noise;
+			/* We expect 1 - 2 more bytes */
+			else if ((sample_start[31] == sample_start[32]) &&
+				(sample_bytes >= sample_len) &&
+				(sample_bytes < sample_len + 2) &&
+				(vdata[i + 1] <= 0x7))
+				continue;
 
-		mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
-		magnitude = spectral_max_magnitude(mag_info->all_bins);
-		fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
-		max_index = spectral_max_index(mag_info->all_bins);
-		fft_sample_20.max_index = max_index;
-		bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
-		fft_sample_20.bitmap_weight = bitmap_w;
-		fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+			/* Try to distinguish cases a and c */
+			else if ((sample_bytes == sample_len - 1) &&
+				(vdata[i + 1] <= 0x7))
+				continue;
 
-		fft_sample_20.tsf = __cpu_to_be64(tsf);
+			got_slen = 1;
+		}
 
-		tlv = (struct fft_sample_tlv *)&fft_sample_20;
+		if (got_slen) {
+			ath_dbg(common, SPECTRAL_SCAN, "FFT frame len: %i\n",
+				sample_bytes);
+
+			/* Only try to fix a frame if it's the only one
+			 * on the report, else just skip it.
+			 */
+			if (sample_bytes != sample_len && len <= fft_len + 2) {
+				ath_cmn_copy_fft_frame(sample_start,
+						       sample_buf, sample_len,
+						       sample_bytes);
+
+				fft_handler(rs, spec_priv, sample_buf,
+					    tsf, freq, chan_type);
+
+				memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
+
+				/* Mix the received bins to the /dev/random
+				 * pool
+				 */
+				add_device_randomness(sample_buf, num_bins);
+			}
+
+			/* Process a normal frame */
+			if (sample_bytes == sample_len) {
+				ret = fft_handler(rs, spec_priv, sample_start,
+						  tsf, freq, chan_type);
+
+				/* Mix the received bins to the /dev/random
+				 * pool
+				 */
+				add_device_randomness(sample_start, num_bins);
+			}
+
+			/* Short report processed, break out of the
+			 * loop.
+			 */
+			if (len <= fft_len + 2)
+				break;
+
+			sample_start = &vdata[i + 1];
+
+			/* -1 to grab sample_len -1, -2 since
+			 * they 'll get increased by one. In case
+			 * of failure try to recover by going byte
+			 * by byte instead.
+			 */
+			if (ret == 0) {
+				i += num_bins - 2;
+				sample_bytes = num_bins - 2;
+			}
+			got_slen = 0;
+		}
 	}
 
-	ath_debug_send_fft_sample(spec_priv, tlv);
-
+	i -= num_bins - 2;
+	if (len - i != sizeof(struct ath_radar_info))
+		ath_dbg(common, SPECTRAL_SCAN, "FFT report truncated"
+						"(bytes left: %i)\n",
+						len - i);
 	return 1;
 }
 EXPORT_SYMBOL(ath_cmn_process_fft);
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 82d9dd2..998743b 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -66,6 +66,8 @@
 } __packed;
 
 #define SPECTRAL_HT20_TOTAL_DATA_LEN	(sizeof(struct ath_ht20_fft_packet))
+#define	SPECTRAL_HT20_SAMPLE_LEN	(sizeof(struct ath_ht20_mag_info) +\
+					SPECTRAL_HT20_NUM_BINS)
 
 /* Dynamic 20/40 mode:
  *
@@ -101,6 +103,10 @@
 };
 
 #define SPECTRAL_HT20_40_TOTAL_DATA_LEN	(sizeof(struct ath_ht20_40_fft_packet))
+#define	SPECTRAL_HT20_40_SAMPLE_LEN	(sizeof(struct ath_ht20_40_mag_info) +\
+					SPECTRAL_HT20_40_NUM_BINS)
+
+#define	SPECTRAL_SAMPLE_MAX_LEN		SPECTRAL_HT20_40_SAMPLE_LEN
 
 /* grabs the max magnitude from the all/upper/lower bins */
 static inline u16 spectral_max_magnitude(u8 *bins)
@@ -111,17 +117,32 @@
 }
 
 /* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+static inline u8 spectral_max_index(u8 *bins, int num_bins)
 {
 	s8 m = (bins[2] & 0xfc) >> 2;
+	u8 zero_idx = num_bins / 2;
 
-	/* TODO: this still doesn't always report the right values ... */
-	if (m > 32)
+	/* It's a 5 bit signed int, remove its sign and use one's
+	 * complement interpretation to add the sign back to the 8
+	 * bit int
+	 */
+	if (m & 0x20) {
+		m &= ~0x20;
 		m |= 0xe0;
-	else
-		m &= ~0xe0;
+	}
 
-	return m + 29;
+	/* Bring the zero point to the beginning
+	 * instead of the middle so that we can use
+	 * it for array lookup and that we don't deal
+	 * with negative values later
+	 */
+	m += zero_idx;
+
+	/* Sanity check to make sure index is within bounds */
+	if (m < 0 || m > num_bins - 1)
+		m = 0;
+
+	return m;
 }
 
 /* return the bitmap weight from the all/upper/lower bins */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index e82a0d4..16dff4b 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -440,9 +440,9 @@
 }
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
-#define OP_BT_PRIORITY_DETECTED    BIT(3)
-#define OP_BT_SCAN                 BIT(4)
-#define OP_TSF_RESET               BIT(6)
+#define OP_BT_PRIORITY_DETECTED    3
+#define OP_BT_SCAN                 4
+#define OP_TSF_RESET               6
 
 enum htc_op_flags {
 	HTC_FWFLAG_NO_RMW,
@@ -531,6 +531,7 @@
 	struct ath9k_debug debug;
 #endif
 	struct mutex mutex;
+	struct ieee80211_vif *csa_vif;
 };
 
 static inline void ath_read_cachesize(struct ath_common *common, int *csz)
@@ -584,6 +585,7 @@
 void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
 void ath9k_tx_failed_tasklet(unsigned long data);
 void ath9k_htc_tx_cleanup_timer(unsigned long data);
+bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv);
 
 int ath9k_rx_init(struct ath9k_htc_priv *priv);
 void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index e8b6ec3..e6bcb4c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -257,6 +257,8 @@
 	}
 
 	spin_unlock_bh(&priv->beacon_lock);
+
+	ath9k_htc_csa_is_finished(priv);
 }
 
 static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
@@ -503,3 +505,20 @@
 		return;
 	}
 }
+
+bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
+{
+	struct ieee80211_vif *vif;
+
+	vif = priv->csa_vif;
+	if (!vif || !vif->csa_active)
+		return false;
+
+	if (!ieee80211_csa_is_complete(vif))
+		return false;
+
+	ieee80211_csa_finish(vif);
+
+	priv->csa_vif = NULL;
+	return true;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d7beefe..39eaf9b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -594,7 +594,7 @@
 
 	priv->spec_priv.ah = priv->ah;
 	priv->spec_priv.spec_config.enabled = 0;
-	priv->spec_priv.spec_config.short_repeat = false;
+	priv->spec_priv.spec_config.short_repeat = true;
 	priv->spec_priv.spec_config.count = 8;
 	priv->spec_priv.spec_config.endless = false;
 	priv->spec_priv.spec_config.period = 0x12;
@@ -717,18 +717,18 @@
 	struct ath_common *common = ath9k_hw_common(priv->ah);
 	struct base_eep_header *pBase;
 
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_AMPDU_AGGREGATION |
-		IEEE80211_HW_SPECTRUM_MGMT |
-		IEEE80211_HW_HAS_RATE_CONTROL |
-		IEEE80211_HW_RX_INCLUDES_FCS |
-		IEEE80211_HW_PS_NULLFUNC_STACK |
-		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		IEEE80211_HW_MFP_CAPABLE |
-		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 
 	if (ath9k_ps_enable)
-		hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+		ieee80211_hw_set(hw, SUPPORTS_PS);
 
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_STATION) |
@@ -744,7 +744,8 @@
 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
-			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+			    WIPHY_FLAG_HAS_CHANNEL_SWITCH;
 
 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
 
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 564923c..dab1323 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1134,6 +1134,9 @@
 	priv->nvifs--;
 	priv->vif_slot &= ~(1 << avp->index);
 
+	if (priv->csa_vif == vif)
+		priv->csa_vif = NULL;
+
 	ath9k_htc_remove_station(priv, vif, NULL);
 
 	DEC_VIF(priv, vif->type);
@@ -1238,8 +1241,7 @@
 }
 
 #define SUPPORTED_FILTERS			\
-	(FIF_PROMISC_IN_BSS |			\
-	FIF_ALLMULTI |				\
+	(FIF_ALLMULTI |				\
 	FIF_CONTROL |				\
 	FIF_PSPOLL |				\
 	FIF_OTHER_BSS |				\
@@ -1842,6 +1844,19 @@
 	return 0;
 }
 
+static void ath9k_htc_channel_switch_beacon(struct ieee80211_hw *hw,
+					    struct ieee80211_vif *vif,
+					    struct cfg80211_chan_def *chandef)
+{
+	struct ath9k_htc_priv *priv = hw->priv;
+
+	/* mac80211 does not support CSA in multi-if cases (yet) */
+	if (WARN_ON(priv->csa_vif))
+		return;
+
+	priv->csa_vif = vif;
+}
+
 struct ieee80211_ops ath9k_htc_ops = {
 	.tx                 = ath9k_htc_tx,
 	.start              = ath9k_htc_start,
@@ -1868,6 +1883,7 @@
 	.set_bitrate_mask   = ath9k_htc_set_bitrate_mask,
 	.get_stats	    = ath9k_htc_get_stats,
 	.get_antenna	    = ath9k_htc_get_antenna,
+	.channel_switch_beacon	= ath9k_htc_channel_switch_beacon,
 
 #ifdef CONFIG_ATH9K_HTC_DEBUGFS
 	.get_et_sset_count  = ath9k_htc_get_et_sset_count,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index a0f58e2..cc9648f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -872,14 +872,7 @@
 	if (priv->rxfilter & FIF_PROBE_REQ)
 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
 
-	/*
-	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
-	 * mode interface or when in monitor mode. AP mode does not need this
-	 * since it receives all in-BSS frames anyway.
-	 */
-	if (((ah->opmode != NL80211_IFTYPE_AP) &&
-	     (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
-	    ah->is_monitoring)
+	if (ah->is_monitoring)
 		rfilt |= ATH9K_RX_FILTER_PROM;
 
 	if (priv->rxfilter & FIF_CONTROL)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index c1d2d03..e8454db 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1119,6 +1119,8 @@
 void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
 void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
 				 struct ath9k_channel *chan);
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+				 struct ath9k_channel *chan, int bin);
 void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
 				 struct ath9k_channel *chan, int ht40_delta);
 
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f8d11ef..eff0e53 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -796,7 +796,7 @@
 	if (!ath9k_is_chanctx_enabled())
 		return;
 
-	hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
 	hw->queues = ATH9K_NUM_TX_QUEUES;
 	hw->offchannel_tx_hw_queue = hw->queues - 1;
 	hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
@@ -818,20 +818,20 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-		IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_PS_NULLFUNC_STACK |
-		IEEE80211_HW_SPECTRUM_MGMT |
-		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		IEEE80211_HW_SUPPORTS_RC_TABLE |
-		IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
 
 	if (ath9k_ps_enable)
-		hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+		ieee80211_hw_set(hw, SUPPORTS_PS);
 
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
-		hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 
 		if (AR_SREV_9280_20_OR_LATER(ah))
 			hw->radiotap_mcs_details |=
@@ -839,7 +839,7 @@
 	}
 
 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
-		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+		ieee80211_hw_set(hw, MFP_CAPABLE);
 
 	hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
 			       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b0badef..cfd45cb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -216,11 +216,13 @@
 	ath_stop_ani(sc);
 	ath9k_hw_disable_interrupts(ah);
 
-	if (!ath_drain_all_txq(sc))
-		ret = false;
-
-	if (!ath_stoprecv(sc))
-		ret = false;
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		ret &= ath_stoprecv(sc);
+		ret &= ath_drain_all_txq(sc);
+	} else {
+		ret &= ath_drain_all_txq(sc);
+		ret &= ath_stoprecv(sc);
+	}
 
 	return ret;
 }
@@ -1442,8 +1444,7 @@
 }
 
 #define SUPPORTED_FILTERS			\
-	(FIF_PROMISC_IN_BSS |			\
-	FIF_ALLMULTI |				\
+	(FIF_ALLMULTI |				\
 	FIF_CONTROL |				\
 	FIF_PSPOLL |				\
 	FIF_OTHER_BSS |				\
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6fb40ef..6c75fb1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -392,11 +392,6 @@
 	if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
 
-	/*
-	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
-	 * mode interface or when in monitor mode. AP mode does not need this
-	 * since it receives all in-BSS frames anyway.
-	 */
 	if (sc->sc_ah->is_monitoring)
 		rfilt |= ATH9K_RX_FILTER_PROM;
 
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 47d5c2e..88045f9 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -286,7 +286,7 @@
 	}
 
 	if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
-		ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+		ieee80211_hw_set(ar->hw, SUPPORTS_PS);
 
 	if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
 		dev_err(&ar->udev->dev, "firmware does not provide "
@@ -310,8 +310,7 @@
 	if (SUPP(CARL9170FW_RX_FILTER)) {
 		ar->fw.rx_filter = true;
 		ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
-			FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS |
-			FIF_PROMISC_IN_BSS;
+			FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS;
 	}
 
 	if (SUPP(CARL9170FW_HW_COUNTERS))
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
index 78dadc7..2c74425 100644
--- a/drivers/net/wireless/ath/carl9170/led.c
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -122,7 +122,7 @@
 }
 
 static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
-				     char *trigger)
+				     const char *trigger)
 {
 	int err;
 
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f1455a0..170c209 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1011,9 +1011,8 @@
 	if (multicast != ar->cur_mc_hash)
 		WARN_ON(carl9170_update_multicast(ar, multicast));
 
-	if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
-		ar->sniffer_enabled = !!(*new_flags &
-			(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
+	if (changed_flags & FIF_OTHER_BSS) {
+		ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
 
 		WARN_ON(carl9170_set_operating_mode(ar));
 	}
@@ -1033,7 +1032,7 @@
 		if (!(*new_flags & FIF_PSPOLL))
 			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
 
-		if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
+		if (!(*new_flags & FIF_OTHER_BSS)) {
 			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
 			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
 		}
@@ -1845,22 +1844,22 @@
 	/* firmware decides which modes we support */
 	hw->wiphy->interface_modes = 0;
 
-	hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
-		     IEEE80211_HW_MFP_CAPABLE |
-		     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		     IEEE80211_HW_SUPPORTS_PS |
-		     IEEE80211_HW_PS_NULLFUNC_STACK |
-		     IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
-		     IEEE80211_HW_SUPPORTS_RC_TABLE |
-		     IEEE80211_HW_SIGNAL_DBM |
-		     IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
 
 	if (!modparam_noht) {
 		/*
 		 * see the comment above, why we allow the user
 		 * to disable HT by a module parameter.
 		 */
-		hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	}
 
 	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c9f9331..76842e6 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -651,6 +651,7 @@
 	unsigned int plen, void *payload, unsigned int outlen, void *out)
 {
 	int err = -ENOMEM;
+	unsigned long time_left;
 
 	if (!IS_ACCEPTING_CMD(ar))
 		return -EIO;
@@ -672,8 +673,8 @@
 	err = __carl9170_exec_cmd(ar, &ar->cmd, false);
 
 	if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
-		err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
-		if (err == 0) {
+		time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
+		if (time_left == 0) {
 			err = -ETIMEDOUT;
 			goto err_unbuf;
 		}
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index c657ca2..656ce42 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -41,30 +41,31 @@
 
 /* percentage on ppb threshold to trigger detection */
 #define MIN_PPB_THRESH	50
-#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
+#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
 #define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
 /* percentage of pulse width tolerance */
 #define WIDTH_TOLERANCE 5
 #define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
 #define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
 
-#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)	\
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP)	\
 {								\
 	ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),		\
 	(PRF2PRI(PMAX) - PRI_TOLERANCE),			\
 	(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF,	\
-	PPB_THRESH(PPB), PRI_TOLERANCE,				\
+	PPB_THRESH(PPB), PRI_TOLERANCE,	CHIRP			\
 }
 
 /* radar types as defined by ETSI EN-301-893 v1.5.1 */
 static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
-	ETSI_PATTERN(0,  0,  1,  700,  700, 1, 18),
-	ETSI_PATTERN(1,  0,  5,  200, 1000, 1, 10),
-	ETSI_PATTERN(2,  0, 15,  200, 1600, 1, 15),
-	ETSI_PATTERN(3,  0, 15, 2300, 4000, 1, 25),
-	ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
-	ETSI_PATTERN(5,  0,  2,  300,  400, 3, 10),
-	ETSI_PATTERN(6,  0,  2,  400, 1200, 3, 15),
+	ETSI_PATTERN(0,  0,  1,  700,  700, 1, 18, false),
+	ETSI_PATTERN(1,  0,  5,  200, 1000, 1, 10, false),
+	ETSI_PATTERN(2,  0, 15,  200, 1600, 1, 15, false),
+	ETSI_PATTERN(3,  0, 15, 2300, 4000, 1, 25, false),
+	ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
+	ETSI_PATTERN(5,  0,  2,  300,  400, 3, 10, false),
+	ETSI_PATTERN(6,  0,  2,  400, 1200, 3, 15, false),
 };
 
 static const struct radar_types etsi_radar_types_v15 = {
@@ -73,21 +74,30 @@
 	.radar_types		= etsi_radar_ref_types_v15,
 };
 
-#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)	\
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP)	\
 {								\
 	ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),		\
 	PMIN - PRI_TOLERANCE,					\
 	PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF,		\
-	PPB_THRESH(PPB), PRI_TOLERANCE,				\
+	PPB_THRESH(PPB), PRI_TOLERANCE,	CHIRP			\
 }
 
+/* radar types released on August 14, 2014
+ * type 1 PRI values randomly selected within the range of 518 and 3066.
+ * divide it to 3 groups is good enough for both of radar detection and
+ * avoiding false detection based on practical test results
+ * collected for more than a year.
+ */
 static const struct radar_detector_specs fcc_radar_ref_types[] = {
-	FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
-	FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
-	FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
-	FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
-	FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
-	FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+	FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
+	FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
+	FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
+	FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
+	FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
+	FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
+	FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
+	FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
+	FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
 };
 
 static const struct radar_types fcc_radar_types = {
@@ -96,17 +106,23 @@
 	.radar_types		= fcc_radar_ref_types,
 };
 
-#define JP_PATTERN FCC_PATTERN
+#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP)	\
+{								\
+	ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),		\
+	PMIN - PRI_TOLERANCE,					\
+	PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF,		\
+	PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP	\
+}
 static const struct radar_detector_specs jp_radar_ref_types[] = {
-	JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
-	JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
-	JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
-	JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
-	JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
-	JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
-	JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
-	JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
-	JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+	JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
+	JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
+	JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
+	JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+	JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
+	JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
+	JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
+	JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
+	JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
 };
 
 static const struct radar_types jp_radar_types = {
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index dde2652..25a43d6 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -40,12 +40,14 @@
  * @freq: channel frequency in MHz
  * @width: pulse duration in us
  * @rssi: rssi of radar event
+ * @chirp: chirp detected in pulse
  */
 struct pulse_event {
 	u64 ts;
 	u16 freq;
 	u8 width;
 	u8 rssi;
+	bool chirp;
 };
 
 /**
@@ -59,6 +61,7 @@
  * @ppb: pulses per bursts for this type
  * @ppb_thresh: number of pulses required to trigger detection
  * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ * @chirp: chirp required for the radar pattern
  */
 struct radar_detector_specs {
 	u8 type_id;
@@ -70,6 +73,7 @@
 	u8 ppb;
 	u8 ppb_thresh;
 	u8 max_pri_tolerance;
+	bool chirp;
 };
 
 /**
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 43b6081..1b5ad19 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -390,6 +390,10 @@
 	if ((ts - de->last_ts) < rs->max_pri_tolerance)
 		/* if delta to last pulse is too short, don't use this pulse */
 		return NULL;
+	/* radar detector spec needs chirp, but not detected */
+	if (rs->chirp && rs->chirp != event->chirp)
+		return NULL;
+
 	de->last_ts = ts;
 
 	max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 0783d2e..900e72a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -944,12 +944,12 @@
 		WLAN_CIPHER_SUITE_CCMP,
 	};
 
-	wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_HAS_RATE_CONTROL |
-		IEEE80211_HW_SUPPORTS_PS |
-		IEEE80211_HW_CONNECTION_MONITOR |
-		IEEE80211_HW_AMPDU_AGGREGATION |
-		IEEE80211_HW_TIMING_BEACON_ONLY;
+	ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
+	ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
+	ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
+	ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
 
 	wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index dbd8944..c9263e1 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -216,9 +216,7 @@
 		memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
 
 	sta_params->encrypt_type = priv_vif->encrypt_type;
-	sta_params->short_preamble_supported =
-		!(WCN36XX_FLAGS(wcn) &
-		  IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+	sta_params->short_preamble_supported = true;
 
 	sta_params->rifs_mode = 0;
 	sta_params->rmf = 0;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index caa717b..050506f 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -12,6 +12,7 @@
 wil6210-y += rx_reorder.o
 wil6210-y += ioctl.o
 wil6210-y += fw.o
+wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index b971726..c79cfe0 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -289,6 +289,26 @@
 	}
 
 	wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
+	wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
+
+	for (i = 0; i < request->n_ssids; i++) {
+		wil_dbg_misc(wil, "SSID[%d]", i);
+		print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+				     request->ssids[i].ssid,
+				     request->ssids[i].ssid_len);
+	}
+
+	if (request->n_ssids)
+		rc = wmi_set_ssid(wil, request->ssids[0].ssid_len,
+				  request->ssids[0].ssid);
+	else
+		rc = wmi_set_ssid(wil, 0, NULL);
+
+	if (rc) {
+		wil_err(wil, "set SSID for scan request failed: %d\n", rc);
+		return rc;
+	}
+
 	wil->scan_request = request;
 	mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
 
@@ -402,11 +422,8 @@
 	rsn_eid = sme->ie ?
 			cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
 			NULL;
-
-	if (sme->privacy && !rsn_eid) {
-		wil_err(wil, "Missing RSN IE for secure connection\n");
-		return -EINVAL;
-	}
+	if (sme->privacy && !rsn_eid)
+		wil_info(wil, "WSC connection\n");
 
 	bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
 			       sme->ssid, sme->ssid_len,
@@ -425,10 +442,17 @@
 	wil->privacy = sme->privacy;
 
 	if (wil->privacy) {
-		/* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
-		rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+		/* For secure assoc, remove old keys */
+		rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+					WMI_KEY_USE_PAIRWISE);
 		if (rc) {
-			wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+			wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+			goto out;
+		}
+		rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+					WMI_KEY_USE_RX_GROUP);
+		if (rc) {
+			wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
 			goto out;
 		}
 	}
@@ -458,11 +482,18 @@
 		goto out;
 	}
 	if (wil->privacy) {
-		conn.dot11_auth_mode = WMI_AUTH11_SHARED;
-		conn.auth_mode = WMI_AUTH_WPA2_PSK;
-		conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
-		conn.pairwise_crypto_len = 16;
-	} else {
+		if (rsn_eid) { /* regular secure connection */
+			conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+			conn.auth_mode = WMI_AUTH_WPA2_PSK;
+			conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+			conn.pairwise_crypto_len = 16;
+			conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
+			conn.group_crypto_len = 16;
+		} else { /* WSC */
+			conn.dot11_auth_mode = WMI_AUTH11_WSC;
+			conn.auth_mode = WMI_AUTH_NONE;
+		}
+	} else { /* insecure connection */
 		conn.dot11_auth_mode = WMI_AUTH11_OPEN;
 		conn.auth_mode = WMI_AUTH_NONE;
 	}
@@ -507,6 +538,8 @@
 	int rc;
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
+	wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+
 	rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
 
 	return rc;
@@ -561,6 +594,39 @@
 	return 0;
 }
 
+static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
+					       bool pairwise)
+{
+	struct wireless_dev *wdev = wil->wdev;
+	enum wmi_key_usage rc;
+	static const char * const key_usage_str[] = {
+		[WMI_KEY_USE_PAIRWISE]	= "WMI_KEY_USE_PAIRWISE",
+		[WMI_KEY_USE_RX_GROUP]	= "WMI_KEY_USE_RX_GROUP",
+		[WMI_KEY_USE_TX_GROUP]	= "WMI_KEY_USE_TX_GROUP",
+	};
+
+	if (pairwise) {
+		rc = WMI_KEY_USE_PAIRWISE;
+	} else {
+		switch (wdev->iftype) {
+		case NL80211_IFTYPE_STATION:
+			rc = WMI_KEY_USE_RX_GROUP;
+			break;
+		case NL80211_IFTYPE_AP:
+			rc = WMI_KEY_USE_TX_GROUP;
+			break;
+		default:
+			/* TODO: Rx GTK or Tx GTK? */
+			wil_err(wil, "Can't determine GTK type\n");
+			rc = WMI_KEY_USE_RX_GROUP;
+			break;
+		}
+	}
+	wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+
+	return rc;
+}
+
 static int wil_cfg80211_add_key(struct wiphy *wiphy,
 				struct net_device *ndev,
 				u8 key_index, bool pairwise,
@@ -568,13 +634,13 @@
 				struct key_params *params)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
 
-	/* group key is not used */
-	if (!pairwise)
-		return 0;
+	wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+		     pairwise ? "PTK" : "GTK");
 
-	return wmi_add_cipher_key(wil, key_index, mac_addr,
-				  params->key_len, params->key);
+	return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+				  params->key, key_usage);
 }
 
 static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -583,12 +649,12 @@
 				const u8 *mac_addr)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
 
-	/* group key is not used */
-	if (!pairwise)
-		return 0;
+	wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+		     pairwise ? "PTK" : "GTK");
 
-	return wmi_del_cipher_key(wil, key_index, mac_addr);
+	return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
 }
 
 /* Need to be present or wiphy_new() will WARN */
@@ -661,11 +727,6 @@
 	if (bcon->probe_resp_len <= hlen)
 		return 0;
 
-	if (!bcon->proberesp_ies) {
-		bcon->proberesp_ies = f->u.probe_resp.variable;
-		bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
-		rc = 1;
-	}
 	if (!bcon->assocresp_ies) {
 		bcon->assocresp_ies = f->u.probe_resp.variable;
 		bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
@@ -680,9 +741,19 @@
 				      struct cfg80211_beacon_data *bcon)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+	const u8 *pr_ies = NULL;
+	size_t pr_ies_len = 0;
 	int rc;
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
+	wil_print_bcon_data(bcon);
+
+	if (bcon->probe_resp_len > hlen) {
+		pr_ies = f->u.probe_resp.variable;
+		pr_ies_len = bcon->probe_resp_len - hlen;
+	}
 
 	if (wil_fix_bcon(wil, bcon)) {
 		wil_dbg_misc(wil, "Fixed bcon\n");
@@ -695,9 +766,7 @@
 	 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
 	 * bcon->beacon_ies);
 	 */
-	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
-			bcon->proberesp_ies_len,
-			bcon->proberesp_ies);
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
 	if (rc) {
 		wil_err(wil, "set_ie(PROBE_RESP) failed\n");
 		return rc;
@@ -725,6 +794,11 @@
 	struct cfg80211_beacon_data *bcon = &info->beacon;
 	struct cfg80211_crypto_settings *crypto = &info->crypto;
 	u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+	const u8 *pr_ies = NULL;
+	size_t pr_ies_len = 0;
+	u8 hidden_ssid;
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -737,6 +811,8 @@
 		     channel->center_freq, info->privacy ? "secure" : "open");
 	wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
 		     info->privacy, info->auth_type);
+	wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
+		     info->hidden_ssid);
 	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
 		     info->dtim_period);
 	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
@@ -744,6 +820,11 @@
 	wil_print_bcon_data(bcon);
 	wil_print_crypto(wil, crypto);
 
+	if (bcon->probe_resp_len > hlen) {
+		pr_ies = f->u.probe_resp.variable;
+		pr_ies_len = bcon->probe_resp_len - hlen;
+	}
+
 	if (wil_fix_bcon(wil, bcon)) {
 		wil_dbg_misc(wil, "Fixed bcon\n");
 		wil_print_bcon_data(bcon);
@@ -771,17 +852,34 @@
 	 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
 	 * bcon->beacon_ies);
 	 */
-	wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
-		   bcon->proberesp_ies);
+	wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
 	wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
 		   bcon->assocresp_ies);
 
 	wil->privacy = info->privacy;
 
+	switch (info->hidden_ssid) {
+	case NL80211_HIDDEN_SSID_NOT_IN_USE:
+		hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
+		break;
+
+	case NL80211_HIDDEN_SSID_ZERO_LEN:
+		hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
+		break;
+
+	case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+		hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
+		break;
+
+	default:
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
 	netif_carrier_on(ndev);
 
 	rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
-			   channel->hw_value);
+			   channel->hw_value, hidden_ssid);
 	if (rc)
 		goto err_pcp_start;
 
@@ -814,13 +912,9 @@
 	wmi_pcp_stop(wil);
 
 	__wil_down(wil);
-	__wil_up(wil);
 
 	mutex_unlock(&wil->mutex);
 
-	/* some functions above might fail (e.g. __wil_up). Nevertheless, we
-	 * return success because AP has stopped
-	 */
 	return 0;
 }
 
@@ -830,6 +924,9 @@
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
+	wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+		     params->reason_code);
+
 	mutex_lock(&wil->mutex);
 	wil6210_disconnect(wil, params->mac, params->reason_code, false);
 	mutex_unlock(&wil->mutex);
@@ -967,8 +1064,7 @@
 
 static void wil_wiphy_init(struct wiphy *wiphy)
 {
-	/* TODO: set real value */
-	wiphy->max_scan_ssids = 10;
+	wiphy->max_scan_ssids = 1;
 	wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
 	wiphy->max_num_pmkids = 0 /* TODO: */;
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index bbc22d8..75219a1b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -24,6 +24,7 @@
 #include "wil6210.h"
 #include "wmi.h"
 #include "txrx.h"
+#include "pmc.h"
 
 /* Nasty hack. Better have per device instances */
 static u32 mem_addr;
@@ -123,15 +124,17 @@
 
 			if (cid < WIL6210_MAX_CID)
 				seq_printf(s,
-					   "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+					   "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
 					   wil->sta[cid].addr, cid, tid,
+					   txdata->dot1x_open ? "+" : "-",
 					   txdata->agg_wsize,
 					   txdata->agg_timeout,
 					   txdata->agg_amsdu ? "+" : "-",
 					   used, avail, sidle);
 			else
 				seq_printf(s,
-					   "\nBroadcast [%3d|%3d] idle %s\n",
+					   "\nBroadcast 1x%s [%3d|%3d] idle %s\n",
+					   txdata->dot1x_open ? "+" : "-",
 					   used, avail, sidle);
 
 			wil_print_vring(s, wil, name, vring, '_', 'H');
@@ -702,6 +705,89 @@
 	.open  = simple_open,
 };
 
+/* pmc control, write:
+ * - "alloc <num descriptors> <descriptor_size>" to allocate PMC
+ * - "free" to release memory allocated for PMC
+ */
+static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
+				size_t len, loff_t *ppos)
+{
+	struct wil6210_priv *wil = file->private_data;
+	int rc;
+	char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+	char cmd[9];
+	int num_descs, desc_size;
+
+	if (!kbuf)
+		return -ENOMEM;
+
+	rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+	if (rc != len) {
+		kfree(kbuf);
+		return rc >= 0 ? -EIO : rc;
+	}
+
+	kbuf[len] = '\0';
+	rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
+	kfree(kbuf);
+
+	if (rc < 0)
+		return rc;
+
+	if (rc < 1) {
+		wil_err(wil, "pmccfg: no params given\n");
+		return -EINVAL;
+	}
+
+	if (0 == strcmp(cmd, "alloc")) {
+		if (rc != 3) {
+			wil_err(wil, "pmccfg: alloc requires 2 params\n");
+			return -EINVAL;
+		}
+		wil_pmc_alloc(wil, num_descs, desc_size);
+	} else if (0 == strcmp(cmd, "free")) {
+		if (rc != 1) {
+			wil_err(wil, "pmccfg: free does not have any params\n");
+			return -EINVAL;
+		}
+		wil_pmc_free(wil, true);
+	} else {
+		wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
+		return -EINVAL;
+	}
+
+	return len;
+}
+
+static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	struct wil6210_priv *wil = file->private_data;
+	char text[256];
+	char help[] = "pmc control, write:\n"
+	" - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
+	" - \"free\" to free memory allocated for pmc\n";
+
+	sprintf(text, "Last command status: %d\n\n%s",
+		wil_pmc_last_cmd_status(wil),
+		help);
+
+	return simple_read_from_buffer(user_buf, count, ppos, text,
+				       strlen(text) + 1);
+}
+
+static const struct file_operations fops_pmccfg = {
+	.read = wil_read_pmccfg,
+	.write = wil_write_pmccfg,
+	.open  = simple_open,
+};
+
+static const struct file_operations fops_pmcdata = {
+	.open		= simple_open,
+	.read		= wil_pmc_read,
+	.llseek		= wil_pmc_llseek,
+};
+
 /*---tx_mgmt---*/
 /* Write mgmt frame to this file to send it */
 static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1111,8 +1197,7 @@
 			status = "connected";
 			break;
 		}
-		seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
-			   (p->data_port_open ? " data_port_open" : ""));
+		seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
 
 		if (p->status == wil_sta_connected) {
 			rc = wil_cid_fill_sinfo(wil, i, &sinfo);
@@ -1275,7 +1360,7 @@
 __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
 {
 	struct wil6210_priv *wil = s->private;
-	int i, tid;
+	int i, tid, mcs;
 
 	for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
 		struct wil_sta_info *p = &wil->sta[i];
@@ -1292,8 +1377,7 @@
 			status = "connected";
 			break;
 		}
-		seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
-			   (p->data_port_open ? " data_port_open" : ""));
+		seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
 
 		if (p->status == wil_sta_connected) {
 			spin_lock_bh(&p->tid_rx_lock);
@@ -1306,6 +1390,12 @@
 				}
 			}
 			spin_unlock_bh(&p->tid_rx_lock);
+			seq_puts(s, "Rx/MCS:");
+			for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
+			     mcs++)
+				seq_printf(s, " %lld",
+					   p->stats.rx_per_mcs[mcs]);
+			seq_puts(s, "\n");
 		}
 	}
 
@@ -1363,6 +1453,8 @@
 	{"tx_mgmt",		  S_IWUSR,	&fops_txmgmt},
 	{"wmi_send",		  S_IWUSR,	&fops_wmi},
 	{"back",	S_IRUGO | S_IWUSR,	&fops_back},
+	{"pmccfg",	S_IRUGO | S_IWUSR,	&fops_pmccfg},
+	{"pmcdata",	S_IRUGO,		&fops_pmcdata},
 	{"temp",	S_IRUGO,		&fops_temp},
 	{"freq",	S_IRUGO,		&fops_freq},
 	{"link",	S_IRUGO,		&fops_link},
@@ -1440,6 +1532,8 @@
 	if (IS_ERR_OR_NULL(dbg))
 		return -ENODEV;
 
+	wil_pmc_init(wil);
+
 	wil6210_debugfs_init_files(wil, dbg);
 	wil6210_debugfs_init_isr(wil, dbg);
 	wil6210_debugfs_init_blobs(wil, dbg);
@@ -1459,4 +1553,9 @@
 {
 	debugfs_remove_recursive(wil->debug);
 	wil->debug = NULL;
+
+	/* free pmc memory without sending command to fw, as it will
+	 * be reset on the way down anyway
+	 */
+	wil_pmc_free(wil, false);
 }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index c2a2384..b9febab 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -25,6 +25,10 @@
 #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
 
+bool debug_fw; /* = false; */
+module_param(debug_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+
 bool no_fw_recovery;
 module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -96,6 +100,8 @@
 MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
 module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
 MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
+module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO);
+MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
 
 #define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
 #define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
@@ -146,7 +152,6 @@
 	wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
 		     sta->status);
 
-	sta->data_port_open = false;
 	if (sta->status != wil_sta_unused) {
 		if (!from_event)
 			wmi_disconnect_sta(wil, sta->addr, reason_code);
@@ -224,7 +229,7 @@
 		if (test_bit(wil_status_fwconnected, wil->status)) {
 			clear_bit(wil_status_fwconnected, wil->status);
 			cfg80211_disconnected(ndev, reason_code,
-					      NULL, 0, GFP_KERNEL);
+					      NULL, 0, false, GFP_KERNEL);
 		} else if (test_bit(wil_status_fwconnecting, wil->status)) {
 			cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
 						WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -373,9 +378,10 @@
 	if (ri < 0)
 		return ri;
 
+	wil->bcast_vring = ri;
 	rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
-	if (rc == 0)
-		wil->bcast_vring = ri;
+	if (rc)
+		wil->bcast_vring = -1;
 
 	return rc;
 }
@@ -547,7 +553,7 @@
 static int wil_target_reset(struct wil6210_priv *wil)
 {
 	int delay = 0;
-	u32 x;
+	u32 x, x1 = 0;
 
 	wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
@@ -602,12 +608,16 @@
 	do {
 		msleep(RST_DELAY);
 		x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+		if (x1 != x) {
+			wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
+			x1 = x;
+		}
 		if (delay++ > RST_COUNT) {
 			wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
 				x);
 			return -ETIME;
 		}
-	} while (!(x & BIT_BL_READY));
+	} while (x != BIT_BL_READY);
 
 	C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
@@ -686,6 +696,17 @@
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 	WARN_ON(test_bit(wil_status_napi_en, wil->status));
 
+	if (debug_fw) {
+		static const u8 mac[ETH_ALEN] = {
+			0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
+		};
+		struct net_device *ndev = wil_to_ndev(wil);
+
+		ether_addr_copy(ndev->perm_addr, mac);
+		ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
+		return 0;
+	}
+
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
 	wil_bcast_fini(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index f2f7ea2..8ef18ac 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -24,6 +24,11 @@
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
+	if (debug_fw) {
+		wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+		return -EINVAL;
+	}
+
 	return wil_up(wil);
 }
 
@@ -127,7 +132,7 @@
 	dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
 }
 
-void *wil_if_alloc(struct device *dev, void __iomem *csr)
+void *wil_if_alloc(struct device *dev)
 {
 	struct net_device *ndev;
 	struct wireless_dev *wdev;
@@ -142,7 +147,6 @@
 	}
 
 	wil = wdev_to_wil(wdev);
-	wil->csr = csr;
 	wil->wdev = wdev;
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 1099861..aa3ecc6 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -27,10 +27,6 @@
 		 " Use MSI interrupt: "
 		 "0 - don't, 1 - (default) - single, or 3");
 
-static bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
-MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
-
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
@@ -133,8 +129,6 @@
 	mutex_lock(&wil->mutex);
 	rc = wil_reset(wil, false);
 	mutex_unlock(&wil->mutex);
-	if (debug_fw)
-		rc = 0;
 	if (rc)
 		goto release_irq;
 
@@ -169,7 +163,6 @@
 {
 	struct wil6210_priv *wil;
 	struct device *dev = &pdev->dev;
-	void __iomem *csr;
 	int rc;
 
 	/* check HW */
@@ -184,9 +177,28 @@
 		return -ENODEV;
 	}
 
+	wil = wil_if_alloc(dev);
+	if (IS_ERR(wil)) {
+		rc = (int)PTR_ERR(wil);
+		dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+		return rc;
+	}
+	wil->pdev = pdev;
+	pci_set_drvdata(pdev, wil);
+	/* rollback to if_free */
+
+	wil->platform_handle =
+			wil_platform_init(&pdev->dev, &wil->platform_ops);
+	if (!wil->platform_handle) {
+		rc = -ENODEV;
+		wil_err(wil, "wil_platform_init failed\n");
+		goto if_free;
+	}
+	/* rollback to err_plat */
+
 	rc = pci_enable_device(pdev);
 	if (rc) {
-		dev_err(&pdev->dev,
+		wil_err(wil,
 			"pci_enable_device failed, retry with MSI only\n");
 		/* Work around for platforms that can't allocate IRQ:
 		 * retry with MSI only
@@ -194,47 +206,37 @@
 		pdev->msi_enabled = 1;
 		rc = pci_enable_device(pdev);
 	}
-	if (rc)
-		return -ENODEV;
+	if (rc) {
+		wil_err(wil,
+			"pci_enable_device failed, even with MSI only\n");
+		goto err_plat;
+	}
 	/* rollback to err_disable_pdev */
 
 	rc = pci_request_region(pdev, 0, WIL_NAME);
 	if (rc) {
-		dev_err(&pdev->dev, "pci_request_region failed\n");
+		wil_err(wil, "pci_request_region failed\n");
 		goto err_disable_pdev;
 	}
 	/* rollback to err_release_reg */
 
-	csr = pci_ioremap_bar(pdev, 0);
-	if (!csr) {
-		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+	wil->csr = pci_ioremap_bar(pdev, 0);
+	if (!wil->csr) {
+		wil_err(wil, "pci_ioremap_bar failed\n");
 		rc = -ENODEV;
 		goto err_release_reg;
 	}
 	/* rollback to err_iounmap */
-	dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);
+	wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
 
-	wil = wil_if_alloc(dev, csr);
-	if (IS_ERR(wil)) {
-		rc = (int)PTR_ERR(wil);
-		dev_err(dev, "wil_if_alloc failed: %d\n", rc);
-		goto err_iounmap;
-	}
-	/* rollback to if_free */
-
-	pci_set_drvdata(pdev, wil);
-	wil->pdev = pdev;
 	wil_set_capabilities(wil);
 	wil6210_clear_irq(wil);
 
-	wil->platform_handle =
-			wil_platform_init(&pdev->dev, &wil->platform_ops);
-
 	/* FW should raise IRQ when ready */
 	rc = wil_if_pcie_enable(wil);
 	if (rc) {
 		wil_err(wil, "Enable device failed\n");
-		goto if_free;
+		goto err_iounmap;
 	}
 	/* rollback to bus_disable */
 
@@ -249,18 +251,19 @@
 
 	return 0;
 
- bus_disable:
+bus_disable:
 	wil_if_pcie_disable(wil);
- if_free:
+err_iounmap:
+	pci_iounmap(pdev, wil->csr);
+err_release_reg:
+	pci_release_region(pdev, 0);
+err_disable_pdev:
+	pci_disable_device(pdev);
+err_plat:
 	if (wil->platform_ops.uninit)
 		wil->platform_ops.uninit(wil->platform_handle);
+if_free:
 	wil_if_free(wil);
- err_iounmap:
-	pci_iounmap(pdev, csr);
- err_release_reg:
-	pci_release_region(pdev, 0);
- err_disable_pdev:
-	pci_disable_device(pdev);
 
 	return rc;
 }
@@ -275,12 +278,12 @@
 	wil6210_debugfs_remove(wil);
 	wil_if_remove(wil);
 	wil_if_pcie_disable(wil);
-	if (wil->platform_ops.uninit)
-		wil->platform_ops.uninit(wil->platform_handle);
-	wil_if_free(wil);
 	pci_iounmap(pdev, csr);
 	pci_release_region(pdev, 0);
 	pci_disable_device(pdev);
+	if (wil->platform_ops.uninit)
+		wil->platform_ops.uninit(wil->platform_handle);
+	wil_if_free(wil);
 }
 
 static const struct pci_device_id wil6210_pcie_ids[] = {
@@ -297,7 +300,27 @@
 	.name		= WIL_NAME,
 };
 
-module_pci_driver(wil6210_driver);
+static int __init wil6210_driver_init(void)
+{
+	int rc;
+
+	rc = wil_platform_modinit();
+	if (rc)
+		return rc;
+
+	rc = pci_register_driver(&wil6210_driver);
+	if (rc)
+		wil_platform_modexit();
+	return rc;
+}
+module_init(wil6210_driver_init);
+
+static void __exit wil6210_driver_exit(void)
+{
+	pci_unregister_driver(&wil6210_driver);
+	wil_platform_modexit();
+}
+module_exit(wil6210_driver_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
new file mode 100644
index 0000000..8a8cdc6
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include "wmi.h"
+#include "wil6210.h"
+#include "txrx.h"
+#include "pmc.h"
+
+struct desc_alloc_info {
+	dma_addr_t pa;
+	void	  *va;
+};
+
+static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
+{
+	return !!pmc->pring_va;
+}
+
+void wil_pmc_init(struct wil6210_priv *wil)
+{
+	memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
+	mutex_init(&wil->pmc.lock);
+}
+
+/**
+ * Allocate the physical ring (p-ring) and the required
+ * number of descriptors of required size.
+ * Initialize the descriptors as required by pmc dma.
+ * The descriptors' buffers dwords are initialized to hold
+ * dword's serial number in the lsw and reserved value
+ * PCM_DATA_INVALID_DW_VAL in the msw.
+ */
+void wil_pmc_alloc(struct wil6210_priv *wil,
+		   int num_descriptors,
+		   int descriptor_size)
+{
+	u32 i;
+	struct pmc_ctx *pmc = &wil->pmc;
+	struct device *dev = wil_to_dev(wil);
+	struct wmi_pmc_cmd pmc_cmd = {0};
+
+	mutex_lock(&pmc->lock);
+
+	if (wil_is_pmc_allocated(pmc)) {
+		/* sanity check */
+		wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+		goto no_release_err;
+	}
+
+	pmc->num_descriptors = num_descriptors;
+	pmc->descriptor_size = descriptor_size;
+
+	wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
+		     __func__, num_descriptors, descriptor_size);
+
+	/* allocate descriptors info list in pmc context*/
+	pmc->descriptors = kcalloc(num_descriptors,
+				  sizeof(struct desc_alloc_info),
+				  GFP_KERNEL);
+	if (!pmc->descriptors) {
+		wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+		goto no_release_err;
+	}
+
+	wil_dbg_misc(wil,
+		     "%s: allocated descriptors info list %p\n",
+		     __func__, pmc->descriptors);
+
+	/* Allocate pring buffer and descriptors.
+	 * vring->va should be aligned on its size rounded up to power of 2
+	 * This is granted by the dma_alloc_coherent
+	 */
+	pmc->pring_va = dma_alloc_coherent(dev,
+			sizeof(struct vring_tx_desc) * num_descriptors,
+			&pmc->pring_pa,
+			GFP_KERNEL);
+
+	wil_dbg_misc(wil,
+		     "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
+		     __func__,
+		     pmc->pring_va, &pmc->pring_pa,
+		     sizeof(struct vring_tx_desc),
+		     num_descriptors,
+		     sizeof(struct vring_tx_desc) * num_descriptors);
+
+	if (!pmc->pring_va) {
+		wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+		goto release_pmc_skb_list;
+	}
+
+	/* initially, all descriptors are SW owned
+	 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
+	 * we can use any
+	 */
+	for (i = 0; i < num_descriptors; i++) {
+		struct vring_tx_desc *_d = &pmc->pring_va[i];
+		struct vring_tx_desc dd, *d = &dd;
+		int j = 0;
+
+		pmc->descriptors[i].va = dma_alloc_coherent(dev,
+			descriptor_size,
+			&pmc->descriptors[i].pa,
+			GFP_KERNEL);
+
+		if (unlikely(!pmc->descriptors[i].va)) {
+			wil_err(wil,
+				"%s: ERROR allocating pmc descriptor %d",
+				__func__, i);
+			goto release_pmc_skbs;
+		}
+
+		for (j = 0; j < descriptor_size / sizeof(u32); j++) {
+			u32 *p = (u32 *)pmc->descriptors[i].va + j;
+			*p = PCM_DATA_INVALID_DW_VAL | j;
+		}
+
+		/* configure dma descriptor */
+		d->dma.addr.addr_low =
+			cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
+		d->dma.addr.addr_high =
+			cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
+		d->dma.status = 0; /* 0 = HW_OWNED */
+		d->dma.length = cpu_to_le16(descriptor_size);
+		d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+		*_d = *d;
+	}
+
+	wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+
+	pmc_cmd.op = WMI_PMC_ALLOCATE;
+	pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
+	pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
+
+	wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+	pmc->last_cmd_status = wmi_send(wil,
+					WMI_PMC_CMDID,
+					&pmc_cmd,
+					sizeof(pmc_cmd));
+	if (pmc->last_cmd_status) {
+		wil_err(wil,
+			"%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
+			__func__, pmc->last_cmd_status);
+		goto release_pmc_skbs;
+	}
+
+	mutex_unlock(&pmc->lock);
+
+	return;
+
+release_pmc_skbs:
+	wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+	for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+		dma_free_coherent(dev,
+				  descriptor_size,
+				  pmc->descriptors[i].va,
+				  pmc->descriptors[i].pa);
+
+		pmc->descriptors[i].va = NULL;
+	}
+	wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+
+	dma_free_coherent(dev,
+			  sizeof(struct vring_tx_desc) * num_descriptors,
+			  pmc->pring_va,
+			  pmc->pring_pa);
+
+	pmc->pring_va = NULL;
+
+release_pmc_skb_list:
+	wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
+		__func__);
+	kfree(pmc->descriptors);
+	pmc->descriptors = NULL;
+
+no_release_err:
+	pmc->last_cmd_status = -ENOMEM;
+	mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Traverse the p-ring and release all buffers.
+ * At the end release the p-ring memory
+ */
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
+{
+	struct pmc_ctx *pmc = &wil->pmc;
+	struct device *dev = wil_to_dev(wil);
+	struct wmi_pmc_cmd pmc_cmd = {0};
+
+	mutex_lock(&pmc->lock);
+
+	pmc->last_cmd_status = 0;
+
+	if (!wil_is_pmc_allocated(pmc)) {
+		wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
+			     __func__);
+		pmc->last_cmd_status = -EPERM;
+		mutex_unlock(&pmc->lock);
+		return;
+	}
+
+	if (send_pmc_cmd) {
+		wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
+			     __func__);
+		pmc_cmd.op = WMI_PMC_RELEASE;
+		pmc->last_cmd_status =
+				wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
+					 sizeof(pmc_cmd));
+		if (pmc->last_cmd_status) {
+			wil_err(wil,
+				"%s WMI_PMC_CMD with RELEASE op failed, status %d",
+				__func__, pmc->last_cmd_status);
+			/* There's nothing we can do with this error.
+			 * Normally, it should never occur.
+			 * Continue to freeing all memory allocated for pmc.
+			 */
+		}
+	}
+
+	if (pmc->pring_va) {
+		size_t buf_size = sizeof(struct vring_tx_desc) *
+				  pmc->num_descriptors;
+
+		wil_dbg_misc(wil, "%s: free pring va %p\n",
+			     __func__, pmc->pring_va);
+		dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
+
+		pmc->pring_va = NULL;
+	} else {
+		pmc->last_cmd_status = -ENOENT;
+	}
+
+	if (pmc->descriptors) {
+		int i;
+
+		for (i = 0;
+		     pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+			dma_free_coherent(dev,
+					  pmc->descriptor_size,
+					  pmc->descriptors[i].va,
+					  pmc->descriptors[i].pa);
+			pmc->descriptors[i].va = NULL;
+		}
+		wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
+			     __func__, i, pmc->num_descriptors);
+		wil_dbg_misc(wil,
+			     "%s: free pmc descriptors info list %p\n",
+			     __func__, pmc->descriptors);
+		kfree(pmc->descriptors);
+		pmc->descriptors = NULL;
+	} else {
+		pmc->last_cmd_status = -ENOENT;
+	}
+
+	mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Status of the last operation requested via debugfs: alloc/free/read.
+ * 0 - success or negative errno
+ */
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
+{
+	wil_dbg_misc(wil, "%s: status %d\n", __func__,
+		     wil->pmc.last_cmd_status);
+
+	return wil->pmc.last_cmd_status;
+}
+
+/**
+ * Read from required position up to the end of current descriptor,
+ * depends on descriptor size configured during alloc request.
+ */
+ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
+		     loff_t *f_pos)
+{
+	struct wil6210_priv *wil = filp->private_data;
+	struct pmc_ctx *pmc = &wil->pmc;
+	size_t retval = 0;
+	unsigned long long idx;
+	loff_t offset;
+	size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+	mutex_lock(&pmc->lock);
+
+	if (!wil_is_pmc_allocated(pmc)) {
+		wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+		pmc->last_cmd_status = -EPERM;
+		mutex_unlock(&pmc->lock);
+		return -EPERM;
+	}
+
+	wil_dbg_misc(wil,
+		     "%s: size %u, pos %lld\n",
+		     __func__, (unsigned)count, *f_pos);
+
+	pmc->last_cmd_status = 0;
+
+	idx = *f_pos;
+	do_div(idx, pmc->descriptor_size);
+	offset = *f_pos - (idx * pmc->descriptor_size);
+
+	if (*f_pos >= pmc_size) {
+		wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
+			     __func__, *f_pos, (unsigned)pmc_size);
+		pmc->last_cmd_status = -ERANGE;
+		goto out;
+	}
+
+	wil_dbg_misc(wil,
+		     "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+		     __func__, *f_pos, idx, offset, count);
+
+	/* if no errors, return the copied byte count */
+	retval = simple_read_from_buffer(buf,
+					 count,
+					 &offset,
+					 pmc->descriptors[idx].va,
+					 pmc->descriptor_size);
+	*f_pos += retval;
+out:
+	mutex_unlock(&pmc->lock);
+
+	return retval;
+}
+
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
+{
+	loff_t newpos;
+	struct wil6210_priv *wil = filp->private_data;
+	struct pmc_ctx *pmc = &wil->pmc;
+	size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+	switch (whence) {
+	case 0: /* SEEK_SET */
+		newpos = off;
+		break;
+
+	case 1: /* SEEK_CUR */
+		newpos = filp->f_pos + off;
+		break;
+
+	case 2: /* SEEK_END */
+		newpos = pmc_size;
+		break;
+
+	default: /* can't happen */
+		return -EINVAL;
+	}
+
+	if (newpos < 0)
+		return -EINVAL;
+	if (newpos > pmc_size)
+		newpos = pmc_size;
+
+	filp->f_pos = newpos;
+
+	return newpos;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
new file mode 100644
index 0000000..bebc8d5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+
+#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
+
+void wil_pmc_init(struct wil6210_priv *wil);
+void wil_pmc_alloc(struct wil6210_priv *wil,
+		   int num_descriptors, int descriptor_size);
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
+ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index e8bd512..aa20af8 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -236,7 +236,7 @@
 		return -ENOMEM;
 	}
 
-	d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+	d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
 	wil_desc_addr_set(&d->dma.addr, pa);
 	/* ip_length don't care */
 	/* b11 don't care */
@@ -427,6 +427,8 @@
 	cid = wil_rxdesc_cid(d);
 	stats = &wil->sta[cid].stats;
 	stats->last_mcs_rx = wil_rxdesc_mcs(d);
+	if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+		stats->rx_per_mcs[stats->last_mcs_rx]++;
 
 	/* use radiotap header only if required */
 	if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
@@ -724,6 +726,8 @@
 
 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
+	if (!wil->privacy)
+		txdata->dot1x_open = true;
 	rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
 		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
 	if (rc)
@@ -738,11 +742,13 @@
 	vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
 
 	txdata->enabled = 1;
-	if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
+	if (txdata->dot1x_open && (agg_wsize >= 0))
 		wil_addba_tx_request(wil, id, agg_wsize);
 
 	return 0;
  out_free:
+	txdata->dot1x_open = false;
+	txdata->enabled = 0;
 	wil_vring_free(wil, vring, 1);
  out:
 
@@ -792,6 +798,8 @@
 
 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
+	if (!wil->privacy)
+		txdata->dot1x_open = true;
 	rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
 		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
 	if (rc)
@@ -809,6 +817,8 @@
 
 	return 0;
  out_free:
+	txdata->enabled = 0;
+	txdata->dot1x_open = false;
 	wil_vring_free(wil, vring, 1);
  out:
 
@@ -828,6 +838,7 @@
 	wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
 
 	spin_lock_bh(&txdata->lock);
+	txdata->dot1x_open = false;
 	txdata->enabled = 0; /* no Tx can be in progress or start anew */
 	spin_unlock_bh(&txdata->lock);
 	/* make sure NAPI won't touch this vring */
@@ -848,12 +859,11 @@
 	if (cid < 0)
 		return NULL;
 
-	if (!wil->sta[cid].data_port_open &&
-	    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
-		return NULL;
-
 	/* TODO: fix for multiple TID */
 	for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+		if (!wil->vring_tx_data[i].dot1x_open &&
+		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+			continue;
 		if (wil->vring2cid_tid[i][0] == cid) {
 			struct vring *v = &wil->vring_tx[i];
 
@@ -883,7 +893,7 @@
 
 	/* In the STA mode, it is expected to have only 1 VRING
 	 * for the AP we connected to.
-	 * find 1-st vring and see whether it is eligible for data
+	 * find 1-st vring eligible for this skb and use it.
 	 */
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
 		v = &wil->vring_tx[i];
@@ -894,9 +904,9 @@
 		if (cid >= WIL6210_MAX_CID) /* skip BCAST */
 			continue;
 
-		if (!wil->sta[cid].data_port_open &&
+		if (!wil->vring_tx_data[i].dot1x_open &&
 		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
-			break;
+			continue;
 
 		wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
 
@@ -918,7 +928,6 @@
  *    in all cases override dest address to unicast peer's address
  * Use old strategy when new is not supported yet:
  *  - for PBSS
- *  - for secure link
  */
 static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
 					 struct sk_buff *skb)
@@ -931,6 +940,9 @@
 	v = &wil->vring_tx[i];
 	if (!v->va)
 		return NULL;
+	if (!wil->vring_tx_data[i].dot1x_open &&
+	    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+		return NULL;
 
 	return v;
 }
@@ -963,7 +975,8 @@
 		cid = wil->vring2cid_tid[i][0];
 		if (cid >= WIL6210_MAX_CID) /* skip BCAST */
 			continue;
-		if (!wil->sta[cid].data_port_open)
+		if (!wil->vring_tx_data[i].dot1x_open &&
+		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
 			continue;
 
 		/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -989,7 +1002,8 @@
 		cid = wil->vring2cid_tid[i][0];
 		if (cid >= WIL6210_MAX_CID) /* skip BCAST */
 			continue;
-		if (!wil->sta[cid].data_port_open)
+		if (!wil->vring_tx_data[i].dot1x_open &&
+		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
 			continue;
 
 		if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1016,9 +1030,6 @@
 	if (wdev->iftype != NL80211_IFTYPE_AP)
 		return wil_find_tx_bcast_2(wil, skb);
 
-	if (wil->privacy)
-		return wil_find_tx_bcast_2(wil, skb);
-
 	return wil_find_tx_bcast_1(wil, skb);
 }
 
@@ -1144,13 +1155,8 @@
 	wil_tx_desc_map(d, pa, len, vring_index);
 	if (unlikely(mcast)) {
 		d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
-		if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
-			/* set MCS 1 */
+		if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
 			d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
-			/* packet mode 2 */
-			d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
-				       (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
-		}
 	}
 	/* Process TCP/UDP checksum offloading */
 	if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index d90c8aa..0c46384 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -384,19 +384,27 @@
  *  [word 7] length
  */
 
-#define RX_DMA_D0_CMD_DMA_IT     BIT(10)
+#define RX_DMA_D0_CMD_DMA_EOP	BIT(8)
+#define RX_DMA_D0_CMD_DMA_RT	BIT(9)  /* always 1 */
+#define RX_DMA_D0_CMD_DMA_IT	BIT(10) /* interrupt */
 
-/* Error field, offload bits */
-#define RX_DMA_ERROR_L3_ERR   BIT(4)
-#define RX_DMA_ERROR_L4_ERR   BIT(5)
+/* Error field */
+#define RX_DMA_ERROR_FCS	BIT(0)
+#define RX_DMA_ERROR_MIC	BIT(1)
+#define RX_DMA_ERROR_KEY	BIT(2) /* Key missing */
+#define RX_DMA_ERROR_REPLAY	BIT(3)
+#define RX_DMA_ERROR_L3_ERR	BIT(4)
+#define RX_DMA_ERROR_L4_ERR	BIT(5)
 
 /* Status field */
-#define RX_DMA_STATUS_DU         BIT(0)
-#define RX_DMA_STATUS_ERROR      BIT(2)
-
+#define RX_DMA_STATUS_DU	BIT(0)
+#define RX_DMA_STATUS_EOP	BIT(1)
+#define RX_DMA_STATUS_ERROR	BIT(2)
+#define RX_DMA_STATUS_MI	BIT(3) /* MAC Interrupt is asserted */
 #define RX_DMA_STATUS_L3I	BIT(4)
 #define RX_DMA_STATUS_L4I	BIT(5)
 #define RX_DMA_STATUS_PHY_INFO	BIT(6)
+#define RX_DMA_STATUS_FFM	BIT(7) /* EtherType Flex Filter Match */
 
 struct vring_rx_dma {
 	u32 d0;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4310972..275355d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -21,6 +21,7 @@
 #include <linux/wireless.h>
 #include <net/cfg80211.h>
 #include <linux/timex.h>
+#include <linux/types.h>
 #include "wil_platform.h"
 
 extern bool no_fw_recovery;
@@ -29,10 +30,11 @@
 extern int agg_wsize;
 extern u32 vring_idle_trsh;
 extern bool rx_align_2;
+extern bool debug_fw;
 
 #define WIL_NAME "wil6210"
 #define WIL_FW_NAME "wil6210.fw" /* code */
-#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
+#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
 
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
@@ -279,7 +281,7 @@
 };
 
 /* array size should be in sync with actual definition in the wmi.c */
-extern const struct fw_map fw_mapping[7];
+extern const struct fw_map fw_mapping[8];
 
 /**
  * mk_cidxtid - construct @cidxtid field
@@ -396,6 +398,7 @@
  * Additional data for Tx Vring
  */
 struct vring_tx_data {
+	bool dot1x_open;
 	int enabled;
 	cycles_t idle, last_idle, begin;
 	u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
@@ -461,6 +464,7 @@
 };
 
 #define WIL_STA_TID_NUM (16)
+#define WIL_MCS_MAX (12) /* Maximum MCS supported */
 
 struct wil_net_stats {
 	unsigned long	rx_packets;
@@ -470,6 +474,7 @@
 	unsigned long	tx_errors;
 	unsigned long	rx_dropped;
 	u16 last_mcs_rx;
+	u64 rx_per_mcs[WIL_MCS_MAX + 1];
 };
 
 /**
@@ -484,7 +489,6 @@
 	u8 addr[ETH_ALEN];
 	enum wil_sta_status status;
 	struct wil_net_stats stats;
-	bool data_port_open; /* can send any data, not only EAPOL */
 	/* Rx BACK */
 	struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
 	spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -526,6 +530,17 @@
 	u8 cid;
 };
 
+struct pmc_ctx {
+	/* alloc, free, and read operations must own the lock */
+	struct mutex		lock;
+	struct vring_tx_desc	*pring_va;
+	dma_addr_t		pring_pa;
+	struct desc_alloc_info  *descriptors;
+	int			last_cmd_status;
+	int			num_descriptors;
+	int			descriptor_size;
+};
+
 struct wil6210_priv {
 	struct pci_dev *pdev;
 	int n_msi;
@@ -610,6 +625,8 @@
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
+
+	struct pmc_ctx pmc;
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -669,7 +686,7 @@
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 			size_t count);
 
-void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void *wil_if_alloc(struct device *dev);
 void wil_if_free(struct wil6210_priv *wil);
 int wil_if_add(struct wil6210_priv *wil);
 void wil_if_remove(struct wil6210_priv *wil);
@@ -701,9 +718,10 @@
 int wmi_set_channel(struct wil6210_priv *wil, int channel);
 int wmi_get_channel(struct wil6210_priv *wil, int *channel);
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
-		       const void *mac_addr);
+		       const void *mac_addr, int key_usage);
 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
-		       const void *mac_addr, int key_len, const void *key);
+		       const void *mac_addr, int key_len, const void *key,
+		       int key_usage);
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
@@ -746,7 +764,8 @@
 void wil_wdev_free(struct wil6210_priv *wil);
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
-int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
+int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
+		  u8 chan, u8 hidden_ssid);
 int wmi_pcp_stop(struct wil6210_priv *wil);
 void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
 			u16 reason_code, bool from_event);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 976a071..de15f14 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -17,6 +17,15 @@
 #include "linux/device.h"
 #include "wil_platform.h"
 
+int __init wil_platform_modinit(void)
+{
+	return 0;
+}
+
+void wil_platform_modexit(void)
+{
+}
+
 /**
  * wil_platform_init() - wil6210 platform module init
  *
@@ -26,10 +35,11 @@
  */
 void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops)
 {
-	void *handle = NULL;
+	void *handle = ops; /* to return some non-NULL for 'void' impl. */
 
 	if (!ops) {
-		dev_err(dev, "Invalid parameter. Cannot init platform module\n");
+		dev_err(dev,
+			"Invalid parameter. Cannot init platform module\n");
 		return NULL;
 	}
 
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 158c73b..d7fa19b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -31,4 +31,7 @@
 
 void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops);
 
+int __init wil_platform_modinit(void);
+void wil_platform_modexit(void);
+
 #endif /* __WIL_PLATFORM_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 9fe2085..c759759 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -85,6 +85,7 @@
 	{0x880000, 0x88a000, 0x880000, "rgf"},     /* various RGF       40k */
 	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table          4k */
 	{0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf       4k */
+	{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext"}, /* mac_ext_rgf  512b */
 	{0x8c0000, 0x949000, 0x8c0000, "upper"},   /* upper area       548k */
 	/*
 	 * 920000..930000 ucode code RAM
@@ -543,55 +544,22 @@
 	}
 }
 
-static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
+static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
 {
-	struct vring_tx_data *t;
-	int i;
+	struct wmi_vring_en_event *evt = d;
+	u8 vri = evt->vring_index;
 
-	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		if (cid != wil->vring2cid_tid[i][0])
-			continue;
-		t = &wil->vring_tx_data[i];
-		if (!t->enabled)
-			continue;
+	wil_dbg_wmi(wil, "Enable vring %d\n", vri);
 
-		wil_addba_tx_request(wil, i, wsize);
-	}
-}
-
-static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
-{
-	struct wmi_data_port_open_event *evt = d;
-	u8 cid = evt->cid;
-
-	wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
-
-	if (cid >= ARRAY_SIZE(wil->sta)) {
-		wil_err(wil, "Link UP for invalid CID %d\n", cid);
+	if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+		wil_err(wil, "Enable for invalid vring %d\n", vri);
 		return;
 	}
-
-	wil->sta[cid].data_port_open = true;
+	wil->vring_tx_data[vri].dot1x_open = true;
+	if (vri == wil->bcast_vring) /* no BA for bcast */
+		return;
 	if (agg_wsize >= 0)
-		wil_addba_tx_cid(wil, cid, agg_wsize);
-}
-
-static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
-{
-	struct net_device *ndev = wil_to_ndev(wil);
-	struct wmi_wbe_link_down_event *evt = d;
-	u8 cid = evt->cid;
-
-	wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
-		    cid, le32_to_cpu(evt->reason));
-
-	if (cid >= ARRAY_SIZE(wil->sta)) {
-		wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
-		return;
-	}
-
-	wil->sta[cid].data_port_open = false;
-	netif_carrier_off(ndev);
+		wil_addba_tx_request(wil, vri, agg_wsize);
 }
 
 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
@@ -695,11 +663,10 @@
 	{WMI_CONNECT_EVENTID,		wmi_evt_connect},
 	{WMI_DISCONNECT_EVENTID,	wmi_evt_disconnect},
 	{WMI_EAPOL_RX_EVENTID,		wmi_evt_eapol_rx},
-	{WMI_DATA_PORT_OPEN_EVENTID,	wmi_evt_linkup},
-	{WMI_WBE_LINKDOWN_EVENTID,	wmi_evt_linkdown},
 	{WMI_BA_STATUS_EVENTID,		wmi_evt_ba_status},
 	{WMI_RCP_ADDBA_REQ_EVENTID,	wmi_evt_addba_rx_req},
 	{WMI_DELBA_EVENTID,		wmi_evt_delba},
+	{WMI_VRING_EN_EVENTID,		wmi_evt_vring_en},
 };
 
 /*
@@ -844,7 +811,7 @@
 	};
 
 	return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
-			 WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+			WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
 }
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -858,7 +825,8 @@
 	return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
 }
 
-int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
+int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
+		  u8 chan, u8 hidden_ssid)
 {
 	int rc;
 
@@ -868,6 +836,7 @@
 		.disable_sec_offload = 1,
 		.channel = chan - 1,
 		.pcp_max_assoc_sta = max_assoc_sta,
+		.hidden_ssid = hidden_ssid,
 	};
 	struct {
 		struct wil6210_mbox_hdr_wmi wmi;
@@ -985,7 +954,7 @@
 }
 
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
-		       const void *mac_addr)
+		       const void *mac_addr, int key_usage)
 {
 	struct wmi_delete_cipher_key_cmd cmd = {
 		.key_index = key_index,
@@ -998,11 +967,12 @@
 }
 
 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
-		       const void *mac_addr, int key_len, const void *key)
+		       const void *mac_addr, int key_len, const void *key,
+		       int key_usage)
 {
 	struct wmi_add_cipher_key_cmd cmd = {
 		.key_index = key_index,
-		.key_usage = WMI_KEY_USE_PAIRWISE,
+		.key_usage = key_usage,
 		.key_len = key_len,
 	};
 
@@ -1238,7 +1208,8 @@
 		    cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
 
 	rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
-		      WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
+		      WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
+		      100);
 	if (rc)
 		return rc;
 
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index b290553..6e90e78 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  * Copyright (c) 2006-2012 Wilocity .
  *
  * Permission to use, copy, modify, and/or distribute this software for any
@@ -253,8 +253,8 @@
  */
 enum wmi_key_usage {
 	WMI_KEY_USE_PAIRWISE	= 0,
-	WMI_KEY_USE_GROUP	= 1,
-	WMI_KEY_USE_TX		= 2,  /* default Tx Key - Static WEP only */
+	WMI_KEY_USE_RX_GROUP	= 1,
+	WMI_KEY_USE_TX_GROUP	= 2,
 };
 
 struct wmi_add_cipher_key_cmd {
@@ -495,10 +495,18 @@
 /*
  * WMI_PCP_START_CMDID
  */
+
+enum wmi_hidden_ssid {
+	WMI_HIDDEN_SSID_DISABLED	= 0,
+	WMI_HIDDEN_SSID_SEND_EMPTY	= 1,
+	WMI_HIDDEN_SSID_CLEAR	= 2,
+};
+
 struct wmi_pcp_start_cmd {
 	__le16 bcon_interval;
 	u8 pcp_max_assoc_sta;
-	u8 reserved0[9];
+	u8 hidden_ssid;
+	u8 reserved0[8];
 	u8 network_type;
 	u8 channel;
 	u8 disable_sec_offload;
@@ -836,6 +844,21 @@
 } __packed;
 
 /*
+ * WMI_PMC_CMDID
+ */
+enum wmi_pmc_op_e {
+	WMI_PMC_ALLOCATE = 0,
+	WMI_PMC_RELEASE = 1,
+};
+
+struct wmi_pmc_cmd {
+	u8 op;		/* enum wmi_pmc_cmd_op_type */
+	u8 reserved;
+	__le16 ring_size;
+	__le64 mem_base;
+} __packed;
+
+/*
  * WMI Events
  */
 
@@ -870,7 +893,7 @@
 	WMI_VRING_CFG_DONE_EVENTID		= 0x1821,
 	WMI_BA_STATUS_EVENTID			= 0x1823,
 	WMI_RCP_ADDBA_REQ_EVENTID		= 0x1824,
-	WMI_ADDBA_RESP_SENT_EVENTID		= 0x1825,
+	WMI_RCP_ADDBA_RESP_SENT_EVENTID		= 0x1825,
 	WMI_DELBA_EVENTID			= 0x1826,
 	WMI_GET_SSID_EVENTID			= 0x1828,
 	WMI_GET_PCP_CHANNEL_EVENTID		= 0x182a,
@@ -882,7 +905,7 @@
 	WMI_WRITE_MAC_TXQ_EVENTID		= 0x1833,
 	WMI_WRITE_MAC_XQ_FIELD_EVENTID		= 0x1834,
 
-	WMI_BEAFORMING_MGMT_DONE_EVENTID	= 0x1836,
+	WMI_BEAMFORMING_MGMT_DONE_EVENTID	= 0x1836,
 	WMI_BF_TXSS_MGMT_DONE_EVENTID		= 0x1837,
 	WMI_BF_RXSS_MGMT_DONE_EVENTID		= 0x1839,
 	WMI_RS_MGMT_DONE_EVENTID		= 0x1852,
@@ -894,11 +917,12 @@
 
 	/* Performance monitoring events */
 	WMI_DATA_PORT_OPEN_EVENTID		= 0x1860,
-	WMI_WBE_LINKDOWN_EVENTID		= 0x1861,
+	WMI_WBE_LINK_DOWN_EVENTID		= 0x1861,
 
 	WMI_BF_CTRL_DONE_EVENTID		= 0x1862,
 	WMI_NOTIFY_REQ_DONE_EVENTID		= 0x1863,
 	WMI_GET_STATUS_DONE_EVENTID		= 0x1864,
+	WMI_VRING_EN_EVENTID			= 0x1865,
 
 	WMI_UNIT_TEST_EVENTID			= 0x1900,
 	WMI_FLASH_READ_DONE_EVENTID		= 0x1902,
@@ -1147,7 +1171,7 @@
 } __packed;
 
 /*
- * WMI_ADDBA_RESP_SENT_EVENTID
+ * WMI_RCP_ADDBA_RESP_SENT_EVENTID
  */
 struct wmi_rcp_addba_resp_sent_event {
 	u8 cidxtid;
@@ -1179,7 +1203,7 @@
 } __packed;
 
 /*
- * WMI_WBE_LINKDOWN_EVENTID
+ * WMI_WBE_LINK_DOWN_EVENTID
  */
 enum wmi_wbe_link_down_event_reason {
 	WMI_WBE_REASON_USER_REQUEST	= 0,
@@ -1202,6 +1226,14 @@
 } __packed;
 
 /*
+ * WMI_VRING_EN_EVENTID
+ */
+struct wmi_vring_en_event {
+	u8 vring_index;
+	u8 reserved[3];
+} __packed;
+
+/*
  * WMI_GET_PCP_CHANNEL_EVENTID
  */
 struct wmi_get_pcp_channel_event {
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b2f9521..2849070 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3131,8 +3131,6 @@
 		ctl |= B43_MACCTL_KEEP_BAD;
 	if (wl->filter_flags & FIF_PLCPFAIL)
 		ctl |= B43_MACCTL_KEEP_BADPLCP;
-	if (wl->filter_flags & FIF_PROMISC_IN_BSS)
-		ctl |= B43_MACCTL_PROMISC;
 	if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
 		ctl |= B43_MACCTL_BEACPROMISC;
 
@@ -4310,16 +4308,14 @@
 		goto out_unlock;
 	}
 
-	*fflags &= FIF_PROMISC_IN_BSS |
-		  FIF_ALLMULTI |
+	*fflags &= FIF_ALLMULTI |
 		  FIF_FCSFAIL |
 		  FIF_PLCPFAIL |
 		  FIF_CONTROL |
 		  FIF_OTHER_BSS |
 		  FIF_BCN_PRBRESP_PROMISC;
 
-	changed &= FIF_PROMISC_IN_BSS |
-		   FIF_ALLMULTI |
+	changed &= FIF_ALLMULTI |
 		   FIF_FCSFAIL |
 		   FIF_PLCPFAIL |
 		   FIF_CONTROL |
@@ -5365,6 +5361,10 @@
 		*have_5ghz_phy = true;
 		return;
 	case 0x4321: /* BCM4306 */
+		/* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
+		if (dev->phy.type != B43_PHYTYPE_G)
+			break;
+		/* fall through */
 	case 0x4313: /* BCM4311 */
 	case 0x431a: /* BCM4318 */
 	case 0x432a: /* BCM4321 */
@@ -5609,8 +5609,8 @@
 	wl = hw_to_b43_wl(hw);
 
 	/* fill hw info */
-	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		    IEEE80211_HW_SIGNAL_DBM;
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
 
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c77b7f5..afc1fb3 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2055,8 +2055,6 @@
 		ctl |= B43legacy_MACCTL_KEEP_BAD;
 	if (wl->filter_flags & FIF_PLCPFAIL)
 		ctl |= B43legacy_MACCTL_KEEP_BADPLCP;
-	if (wl->filter_flags & FIF_PROMISC_IN_BSS)
-		ctl |= B43legacy_MACCTL_PROMISC;
 	if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
 		ctl |= B43legacy_MACCTL_BEACPROMISC;
 
@@ -2922,16 +2920,14 @@
 	}
 
 	spin_lock_irqsave(&wl->irq_lock, flags);
-	*fflags &= FIF_PROMISC_IN_BSS |
-		  FIF_ALLMULTI |
+	*fflags &= FIF_ALLMULTI |
 		  FIF_FCSFAIL |
 		  FIF_PLCPFAIL |
 		  FIF_CONTROL |
 		  FIF_OTHER_BSS |
 		  FIF_BCN_PRBRESP_PROMISC;
 
-	changed &= FIF_PROMISC_IN_BSS |
-		   FIF_ALLMULTI |
+	changed &= FIF_ALLMULTI |
 		   FIF_FCSFAIL |
 		   FIF_PLCPFAIL |
 		   FIF_CONTROL |
@@ -3836,8 +3832,9 @@
 	}
 
 	/* fill hw info */
-	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		    IEEE80211_HW_SIGNAL_DBM;
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_AP) |
 		BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 9b508bd..410a664 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -33,6 +33,7 @@
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/module.h>
+#include <linux/acpi.h>
 #include <net/cfg80211.h>
 
 #include <defs.h>
@@ -987,6 +988,7 @@
 
 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
+	sdiodev->state = BRCMF_SDIOD_DOWN;
 	if (sdiodev->bus) {
 		brcmf_sdio_remove(sdiodev->bus);
 		sdiodev->bus = NULL;
@@ -1011,6 +1013,14 @@
 	return 0;
 }
 
+static void brcmf_sdiod_host_fixup(struct mmc_host *host)
+{
+	/* runtime-pm powers off the device */
+	pm_runtime_forbid(host->parent);
+	/* avoid removal detection upon resume */
+	host->caps |= MMC_CAP_NONREMOVABLE;
+}
+
 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
 	struct sdio_func *func;
@@ -1076,7 +1086,7 @@
 		ret = -ENODEV;
 		goto out;
 	}
-	pm_runtime_forbid(host->parent);
+	brcmf_sdiod_host_fixup(host);
 out:
 	if (ret)
 		brcmf_sdiod_remove(sdiodev);
@@ -1108,12 +1118,25 @@
 static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
 
 
+static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+						  int val)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+	struct acpi_device *adev;
+
+	adev = ACPI_COMPANION(dev);
+	if (adev)
+		adev->flags.power_manageable = 0;
+#endif
+}
+
 static int brcmf_ops_sdio_probe(struct sdio_func *func,
 				const struct sdio_device_id *id)
 {
 	int err;
 	struct brcmf_sdio_dev *sdiodev;
 	struct brcmf_bus *bus_if;
+	struct device *dev;
 
 	brcmf_dbg(SDIO, "Enter\n");
 	brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1121,6 +1144,10 @@
 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
 	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
 
+	dev = &func->dev;
+	/* prohibit ACPI power management for this device */
+	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+
 	/* Consume func num 1 but dont do anything with it. */
 	if (func->num == 1)
 		return 0;
@@ -1246,15 +1273,15 @@
 	brcmf_sdiod_freezer_on(sdiodev);
 	brcmf_sdio_wd_timer(sdiodev->bus, 0);
 
+	sdio_flags = MMC_PM_KEEP_POWER;
 	if (sdiodev->wowl_enabled) {
-		sdio_flags = MMC_PM_KEEP_POWER;
 		if (sdiodev->pdata->oob_irq_supported)
 			enable_irq_wake(sdiodev->pdata->oob_irq_nr);
 		else
-			sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
-		if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
-			brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+			sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
 	}
+	if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+		brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
 	return 0;
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 8a15ebb..d86d1f1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -52,8 +52,6 @@
 #define BRCMF_PNO_SCAN_COMPLETE		1
 #define BRCMF_PNO_SCAN_INCOMPLETE	0
 
-#define BRCMF_IFACE_MAX_CNT		3
-
 #define WPA_OUI				"\x00\x50\xF2"	/* WPA OUI */
 #define WPA_OUI_TYPE			1
 #define RSN_OUI				"\x00\x0F\xAC"	/* RSN OUI */
@@ -129,13 +127,47 @@
 	RATETAB_ENT(BRCM_RATE_54M, 0),
 };
 
-#define wl_a_rates		(__wl_rates + 4)
-#define wl_a_rates_size	8
 #define wl_g_rates		(__wl_rates + 0)
-#define wl_g_rates_size	12
+#define wl_g_rates_size		ARRAY_SIZE(__wl_rates)
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size		(wl_g_rates_size - 4)
+
+#define CHAN2G(_channel, _freq) {				\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= IEEE80211_CHAN_DISABLED,	\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel) {					\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= IEEE80211_CHAN_DISABLED,	\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
+	CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
+	CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
+	CHAN2G(13, 2472), CHAN2G(14, 2484)
+};
+
+static struct ieee80211_channel __wl_5ghz_channels[] = {
+	CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
+	CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
+	CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
+	CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
+	CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
+	CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
+};
 
 /* Band templates duplicated per wiphy. The channel info
- * is filled in after querying the device.
+ * above is added to the band during setup.
  */
 static const struct ieee80211_supported_band __wl_band_2ghz = {
 	.band = IEEE80211_BAND_2GHZ,
@@ -143,7 +175,7 @@
 	.n_bitrates = wl_g_rates_size,
 };
 
-static const struct ieee80211_supported_band __wl_band_5ghz_a = {
+static const struct ieee80211_supported_band __wl_band_5ghz = {
 	.band = IEEE80211_BAND_5GHZ,
 	.bitrates = wl_a_rates,
 	.n_bitrates = wl_a_rates_size,
@@ -1262,7 +1294,7 @@
 		}
 		clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
 		cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
-				      GFP_KERNEL);
+				      true, GFP_KERNEL);
 
 	}
 	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1928,7 +1960,7 @@
 
 	clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
 	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
-	cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
+	cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
 
 	memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
 	scbval.val = cpu_to_le32(reason_code);
@@ -2364,27 +2396,80 @@
 		brcmf_err("set wsec error (%d)\n", err);
 }
 
+static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
+{
+	struct nl80211_sta_flag_update *sfu;
+
+	brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags);
+	si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+	sfu = &si->sta_flags;
+	sfu->mask = BIT(NL80211_STA_FLAG_WME) |
+		    BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+		    BIT(NL80211_STA_FLAG_ASSOCIATED) |
+		    BIT(NL80211_STA_FLAG_AUTHORIZED);
+	if (fw_sta_flags & BRCMF_STA_WME)
+		sfu->set |= BIT(NL80211_STA_FLAG_WME);
+	if (fw_sta_flags & BRCMF_STA_AUTHE)
+		sfu->set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+	if (fw_sta_flags & BRCMF_STA_ASSOC)
+		sfu->set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
+	if (fw_sta_flags & BRCMF_STA_AUTHO)
+		sfu->set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+}
+
+static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
+{
+	struct {
+		__le32 len;
+		struct brcmf_bss_info_le bss_le;
+	} *buf;
+	u16 capability;
+	int err;
+
+	buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	buf->len = cpu_to_le32(WL_BSS_INFO_MAX);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, buf,
+				     WL_BSS_INFO_MAX);
+	if (err) {
+		brcmf_err("Failed to get bss info (%d)\n", err);
+		return;
+	}
+	si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+	si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
+	si->bss_param.dtim_period = buf->bss_le.dtim_period;
+	capability = le16_to_cpu(buf->bss_le.capability);
+	if (capability & IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT)
+		si->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
+	if (capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+		si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
+	if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+		si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+}
+
 static s32
 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
 			   const u8 *mac, struct station_info *sinfo)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
-	struct brcmf_scb_val_le scb_val;
-	int rssi;
-	s32 rate;
 	s32 err = 0;
-	u8 *bssid = profile->bssid;
 	struct brcmf_sta_info_le sta_info_le;
-	u32 beacon_period;
-	u32 dtim_period;
+	u32 sta_flags;
+	u32 is_tdls_peer;
 
 	brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
 	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
-	if (brcmf_is_apmode(ifp->vif)) {
-		memcpy(&sta_info_le, mac, ETH_ALEN);
+	memset(&sta_info_le, 0, sizeof(sta_info_le));
+	memcpy(&sta_info_le, mac, ETH_ALEN);
+	err = brcmf_fil_iovar_data_get(ifp, "tdls_sta_info",
+				       &sta_info_le,
+				       sizeof(sta_info_le));
+	is_tdls_peer = !err;
+	if (err) {
 		err = brcmf_fil_iovar_data_get(ifp, "sta_info",
 					       &sta_info_le,
 					       sizeof(sta_info_le));
@@ -2392,73 +2477,48 @@
 			brcmf_err("GET STA INFO failed, %d\n", err);
 			goto done;
 		}
-		sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
-		sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
-		if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
-			sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
-			sinfo->connected_time = le32_to_cpu(sta_info_le.in);
-		}
-		brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
-			  sinfo->inactive_time, sinfo->connected_time);
-	} else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
-		if (memcmp(mac, bssid, ETH_ALEN)) {
-			brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
-				  mac, bssid);
-			err = -ENOENT;
-			goto done;
-		}
-		/* Report the current tx rate */
-		err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
-		if (err) {
-			brcmf_err("Could not get rate (%d)\n", err);
-			goto done;
-		} else {
+	}
+	brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver));
+	sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
+	sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
+	sta_flags = le32_to_cpu(sta_info_le.flags);
+	brcmf_convert_sta_flags(sta_flags, sinfo);
+	sinfo->sta_flags.mask |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+	if (is_tdls_peer)
+		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+	else
+		sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+	if (sta_flags & BRCMF_STA_ASSOC) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+		sinfo->connected_time = le32_to_cpu(sta_info_le.in);
+		brcmf_fill_bss_param(ifp, sinfo);
+	}
+	if (sta_flags & BRCMF_STA_SCBSTATS) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+		sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures);
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+		sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts);
+		sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts);
+		sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+		sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts);
+		sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
+		if (sinfo->tx_packets) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
-			sinfo->txrate.legacy = rate * 5;
-			brcmf_dbg(CONN, "Rate %d Mbps\n", rate / 2);
+			sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate);
+			sinfo->txrate.legacy /= 100;
 		}
-
-		if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
-			     &ifp->vif->sme_state)) {
-			memset(&scb_val, 0, sizeof(scb_val));
-			err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
-						     &scb_val, sizeof(scb_val));
-			if (err) {
-				brcmf_err("Could not get rssi (%d)\n", err);
-				goto done;
-			} else {
-				rssi = le32_to_cpu(scb_val.val);
-				sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
-				sinfo->signal = rssi;
-				brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
-			}
-			err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
-						    &beacon_period);
-			if (err) {
-				brcmf_err("Could not get beacon period (%d)\n",
-					  err);
-				goto done;
-			} else {
-				sinfo->bss_param.beacon_interval =
-					beacon_period;
-				brcmf_dbg(CONN, "Beacon peroid %d\n",
-					  beacon_period);
-			}
-			err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
-						    &dtim_period);
-			if (err) {
-				brcmf_err("Could not get DTIM period (%d)\n",
-					  err);
-				goto done;
-			} else {
-				sinfo->bss_param.dtim_period = dtim_period;
-				brcmf_dbg(CONN, "DTIM peroid %d\n",
-					  dtim_period);
-			}
-			sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+		if (sinfo->rx_packets) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+			sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate);
+			sinfo->rxrate.legacy /= 100;
 		}
-	} else
-		err = -EPERM;
+		if (le16_to_cpu(sta_info_le.ver) >= 4) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+			sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes);
+			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+			sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
+		}
+	}
 done:
 	brcmf_dbg(TRACE, "Exit\n");
 	return err;
@@ -5253,40 +5313,6 @@
 	return err;
 }
 
-/* Filter the list of channels received from firmware counting only
- * the 20MHz channels. The wiphy band data only needs those which get
- * flagged to indicate if they can take part in higher bandwidth.
- */
-static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
-				       struct brcmf_chanspec_list *chlist,
-				       u32 chcnt[])
-{
-	u32 total = le32_to_cpu(chlist->count);
-	struct brcmu_chan ch;
-	int i;
-
-	for (i = 0; i < total; i++) {
-		ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
-		cfg->d11inf.decchspec(&ch);
-
-		/* Firmware gives a ordered list. We skip non-20MHz
-		 * channels is 2G. For 5G we can abort upon reaching
-		 * a non-20MHz channel in the list.
-		 */
-		if (ch.bw != BRCMU_CHAN_BW_20) {
-			if (ch.band == BRCMU_CHAN_BAND_5G)
-				break;
-			else
-				continue;
-		}
-
-		if (ch.band == BRCMU_CHAN_BAND_2G)
-			chcnt[0] += 1;
-		else if (ch.band == BRCMU_CHAN_BAND_5G)
-			chcnt[1] += 1;
-	}
-}
-
 static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
 					   struct brcmu_chan *ch)
 {
@@ -5322,7 +5348,6 @@
 	u32 i, j;
 	u32 total;
 	u32 chaninfo;
-	u32 chcnt[2] = { 0, 0 };
 	u32 index;
 
 	pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5339,42 +5364,15 @@
 		goto fail_pbuf;
 	}
 
-	brcmf_count_20mhz_channels(cfg, list, chcnt);
 	wiphy = cfg_to_wiphy(cfg);
-	if (chcnt[0]) {
-		band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
-			       GFP_KERNEL);
-		if (band == NULL) {
-			err = -ENOMEM;
-			goto fail_pbuf;
-		}
-		band->channels = kcalloc(chcnt[0], sizeof(*channel),
-					 GFP_KERNEL);
-		if (band->channels == NULL) {
-			kfree(band);
-			err = -ENOMEM;
-			goto fail_pbuf;
-		}
-		band->n_channels = 0;
-		wiphy->bands[IEEE80211_BAND_2GHZ] = band;
-	}
-	if (chcnt[1]) {
-		band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
-			       GFP_KERNEL);
-		if (band == NULL) {
-			err = -ENOMEM;
-			goto fail_band2g;
-		}
-		band->channels = kcalloc(chcnt[1], sizeof(*channel),
-					 GFP_KERNEL);
-		if (band->channels == NULL) {
-			kfree(band);
-			err = -ENOMEM;
-			goto fail_band2g;
-		}
-		band->n_channels = 0;
-		wiphy->bands[IEEE80211_BAND_5GHZ] = band;
-	}
+	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	if (band)
+		for (i = 0; i < band->n_channels; i++)
+			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+	band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (band)
+		for (i = 0; i < band->n_channels; i++)
+			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
 
 	total = le32_to_cpu(list->count);
 	for (i = 0; i < total; i++) {
@@ -5389,6 +5387,8 @@
 			brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
 			continue;
 		}
+		if (!band)
+			continue;
 		if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
 		    ch.bw == BRCMU_CHAN_BW_40)
 			continue;
@@ -5416,9 +5416,9 @@
 		} else if (ch.bw == BRCMU_CHAN_BW_40) {
 			brcmf_update_bw40_channel_flag(&channel[index], &ch);
 		} else {
-			/* disable other bandwidths for now as mentioned
-			 * order assure they are enabled for subsequent
-			 * chanspecs.
+			/* enable the channel and disable other bandwidths
+			 * for now as mentioned order assure they are enabled
+			 * for subsequent chanspecs.
 			 */
 			channel[index].flags = IEEE80211_CHAN_NO_HT40 |
 					       IEEE80211_CHAN_NO_80MHZ;
@@ -5437,16 +5437,8 @@
 						IEEE80211_CHAN_NO_IR;
 			}
 		}
-		if (index == band->n_channels)
-			band->n_channels++;
 	}
-	kfree(pbuf);
-	return 0;
 
-fail_band2g:
-	kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
-	kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
-	wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
 fail_pbuf:
 	kfree(pbuf);
 	return err;
@@ -5674,53 +5666,6 @@
 	return 0;
 }
 
-static const struct ieee80211_iface_limit brcmf_iface_limits_mbss[] = {
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_STATION) |
-			 BIT(NL80211_IFTYPE_ADHOC)
-	},
-	{
-		.max = 4,
-		.types = BIT(NL80211_IFTYPE_AP)
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
-			 BIT(NL80211_IFTYPE_P2P_GO)
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_DEVICE)
-	}
-};
-
-static const struct ieee80211_iface_limit brcmf_iface_limits_sbss[] = {
-	{
-		.max = 2,
-		.types = BIT(NL80211_IFTYPE_STATION) |
-			 BIT(NL80211_IFTYPE_ADHOC) |
-			 BIT(NL80211_IFTYPE_AP)
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
-			 BIT(NL80211_IFTYPE_P2P_GO)
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_DEVICE)
-	}
-};
-static struct ieee80211_iface_combination brcmf_iface_combos[] = {
-	{
-		 .max_interfaces = BRCMF_IFACE_MAX_CNT,
-		 .num_different_channels = 1,
-		 .n_limits = ARRAY_SIZE(brcmf_iface_limits_sbss),
-		 .limits = brcmf_iface_limits_sbss,
-	}
-};
-
 static const struct ieee80211_txrx_stypes
 brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
 	[NL80211_IFTYPE_STATION] = {
@@ -5750,6 +5695,67 @@
 	}
 };
 
+static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
+{
+	struct ieee80211_iface_combination *combo = NULL;
+	struct ieee80211_iface_limit *limits = NULL;
+	int i = 0, max_iface_cnt;
+
+	combo = kzalloc(sizeof(*combo), GFP_KERNEL);
+	if (!combo)
+		goto err;
+
+	limits = kzalloc(sizeof(*limits) * 4, GFP_KERNEL);
+	if (!limits)
+		goto err;
+
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_ADHOC) |
+				 BIT(NL80211_IFTYPE_AP);
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+		combo->num_different_channels = 2;
+	else
+		combo->num_different_channels = 1;
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
+		limits[i].max = 1;
+		limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+		limits[i].max = 4;
+		limits[i++].types = BIT(NL80211_IFTYPE_AP);
+		max_iface_cnt = 5;
+	} else {
+		limits[i].max = 2;
+		limits[i++].types = BIT(NL80211_IFTYPE_STATION) |
+				    BIT(NL80211_IFTYPE_AP);
+		max_iface_cnt = 2;
+	}
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P)) {
+		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					  BIT(NL80211_IFTYPE_P2P_GO) |
+					  BIT(NL80211_IFTYPE_P2P_DEVICE);
+		limits[i].max = 1;
+		limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				    BIT(NL80211_IFTYPE_P2P_GO);
+		limits[i].max = 1;
+		limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		max_iface_cnt += 2;
+	}
+	combo->max_interfaces = max_iface_cnt;
+	combo->limits = limits;
+	combo->n_limits = i;
+
+	wiphy->iface_combinations = combo;
+	wiphy->n_iface_combinations = 1;
+	return 0;
+
+err:
+	kfree(limits);
+	kfree(combo);
+	return -ENOMEM;
+}
+
 static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
 {
 	/* scheduled scan settings */
@@ -5779,28 +5785,19 @@
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
-	struct ieee80211_iface_combination ifc_combo;
+	struct ieee80211_supported_band *band;
+	__le32 bandlist[3];
+	u32 n_bands;
+	int err, i;
+
 	wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
 	wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
 	wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
-	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-				 BIT(NL80211_IFTYPE_ADHOC) |
-				 BIT(NL80211_IFTYPE_AP) |
-				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
-				 BIT(NL80211_IFTYPE_P2P_GO) |
-				 BIT(NL80211_IFTYPE_P2P_DEVICE);
-	/* need VSDB firmware feature for concurrent channels */
-	ifc_combo = brcmf_iface_combos[0];
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
-		ifc_combo.num_different_channels = 2;
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
-		ifc_combo.n_limits = ARRAY_SIZE(brcmf_iface_limits_mbss),
-		ifc_combo.limits = brcmf_iface_limits_mbss;
-	}
-	wiphy->iface_combinations = kmemdup(&ifc_combo,
-					    sizeof(ifc_combo),
-					    GFP_KERNEL);
-	wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
+
+	err = brcmf_setup_ifmodes(wiphy, ifp);
+	if (err)
+		return err;
+
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 	wiphy->cipher_suites = __wl_cipher_suites;
 	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
@@ -5812,7 +5809,8 @@
 		wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
 	wiphy->mgmt_stypes = brcmf_txrx_stypes;
 	wiphy->max_remain_on_channel_duration = 5000;
-	brcmf_wiphy_pno_params(wiphy);
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
+		brcmf_wiphy_pno_params(wiphy);
 
 	/* vendor commands/events support */
 	wiphy->vendor_commands = brcmf_vendor_cmds;
@@ -5821,7 +5819,52 @@
 	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
 		brcmf_wiphy_wowl_params(wiphy);
 
-	return brcmf_setup_wiphybands(wiphy);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
+				     sizeof(bandlist));
+	if (err) {
+		brcmf_err("could not obtain band info: err=%d\n", err);
+		return err;
+	}
+	/* first entry in bandlist is number of bands */
+	n_bands = le32_to_cpu(bandlist[0]);
+	for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
+		if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
+			band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+				       GFP_KERNEL);
+			if (!band)
+				return -ENOMEM;
+
+			band->channels = kmemdup(&__wl_2ghz_channels,
+						 sizeof(__wl_2ghz_channels),
+						 GFP_KERNEL);
+			if (!band->channels) {
+				kfree(band);
+				return -ENOMEM;
+			}
+
+			band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
+			wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+		}
+		if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
+			band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
+				       GFP_KERNEL);
+			if (!band)
+				return -ENOMEM;
+
+			band->channels = kmemdup(&__wl_5ghz_channels,
+						 sizeof(__wl_5ghz_channels),
+						 GFP_KERNEL);
+			if (!band->channels) {
+				kfree(band);
+				return -ENOMEM;
+			}
+
+			band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
+			wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+		}
+	}
+	err = brcmf_setup_wiphybands(wiphy);
+	return err;
 }
 
 static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6007,11 +6050,20 @@
 	memset(&ccreq, 0, sizeof(ccreq));
 	ccreq.rev = cpu_to_le32(-1);
 	memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
-	brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
+	if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
+		brcmf_err("firmware rejected country setting\n");
+		return;
+	}
+	brcmf_setup_wiphybands(wiphy);
 }
 
 static void brcmf_free_wiphy(struct wiphy *wiphy)
 {
+	if (!wiphy)
+		return;
+
+	if (wiphy->iface_combinations)
+		kfree(wiphy->iface_combinations->limits);
 	kfree(wiphy->iface_combinations);
 	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
 		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
@@ -6047,6 +6099,7 @@
 		brcmf_err("Could not allocate wiphy device\n");
 		return NULL;
 	}
+	memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
 	set_wiphy_dev(wiphy, busdev);
 
 	cfg = wiphy_priv(wiphy);
@@ -6154,10 +6207,8 @@
 	if (!cfg)
 		return;
 
-	WARN_ON(!list_empty(&cfg->vif_list));
-	wiphy_unregister(cfg->wiphy);
 	brcmf_btcoex_detach(cfg);
-	brcmf_p2p_detach(&cfg->p2p);
+	wiphy_unregister(cfg->wiphy);
 	wl_deinit_priv(cfg);
 	brcmf_free_wiphy(cfg->wiphy);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ab2fac8..288f831 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -649,6 +649,7 @@
 	case BRCM_CC_43567_CHIP_ID:
 	case BRCM_CC_43569_CHIP_ID:
 	case BRCM_CC_43570_CHIP_ID:
+	case BRCM_CC_4358_CHIP_ID:
 	case BRCM_CC_43602_CHIP_ID:
 		return 0x180000;
 	default:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
index 77656c7..7b0e521 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -22,17 +22,6 @@
 #include "core.h"
 #include "commonring.h"
 
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- * SEE ALSO msgbuf.c
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
 void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
 				  int (*cr_ring_bell)(void *ctx),
 				  int (*cr_update_rptr)(void *ctx),
@@ -206,14 +195,9 @@
 	address = commonring->buf_addr;
 	address += (commonring->f_ptr * commonring->item_len);
 	if (commonring->f_ptr > commonring->w_ptr) {
-		brcmf_dma_flush(address,
-				(commonring->depth - commonring->f_ptr) *
-				commonring->item_len);
 		address = commonring->buf_addr;
 		commonring->f_ptr = 0;
 	}
-	brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
-			commonring->item_len);
 
 	commonring->f_ptr = commonring->w_ptr;
 
@@ -239,8 +223,6 @@
 void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
 				    u16 *n_items)
 {
-	void *ret_addr;
-
 	if (commonring->cr_update_wptr)
 		commonring->cr_update_wptr(commonring->cr_ctx);
 
@@ -251,21 +233,18 @@
 	if (*n_items == 0)
 		return NULL;
 
-	ret_addr = commonring->buf_addr +
-		   (commonring->r_ptr * commonring->item_len);
-
-	commonring->r_ptr += *n_items;
-	if (commonring->r_ptr == commonring->depth)
-		commonring->r_ptr = 0;
-
-	brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
-
-	return ret_addr;
+	return commonring->buf_addr +
+	       (commonring->r_ptr * commonring->item_len);
 }
 
 
-int brcmf_commonring_read_complete(struct brcmf_commonring *commonring)
+int brcmf_commonring_read_complete(struct brcmf_commonring *commonring,
+				   u16 n_items)
 {
+	commonring->r_ptr += n_items;
+	if (commonring->r_ptr == commonring->depth)
+		commonring->r_ptr = 0;
+
 	if (commonring->cr_write_rptr)
 		return commonring->cr_write_rptr(commonring->cr_ctx);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
index 3d40401..b850336 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
@@ -62,7 +62,8 @@
 				   u16 n_items);
 void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
 				    u16 *n_items);
-int brcmf_commonring_read_complete(struct brcmf_commonring *commonring);
+int brcmf_commonring_read_complete(struct brcmf_commonring *commonring,
+				   u16 n_items);
 
 #define brcmf_commonring_n_items(commonring) (commonring->depth)
 #define brcmf_commonring_len_item(commonring) (commonring->item_len)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index f8f47dc..fe9d3fb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -867,8 +867,6 @@
 		}
 		/* unregister will take care of freeing it */
 		unregister_netdev(ifp->ndev);
-	} else {
-		kfree(ifp);
 	}
 }
 
@@ -1100,6 +1098,8 @@
 
 	/* stop firmware event handling */
 	brcmf_fweh_detach(drvr);
+	if (drvr->config)
+		brcmf_p2p_detach(&drvr->config->p2p);
 
 	brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
index 9b473d5..2d6d005 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -41,15 +41,6 @@
 	root_folder = NULL;
 }
 
-static int brcmf_debugfs_chipinfo_read(struct seq_file *seq, void *data)
-{
-	struct brcmf_bus *bus = dev_get_drvdata(seq->private);
-
-	seq_printf(seq, "chip: %x(%u) rev %u\n",
-		   bus->chip, bus->chip, bus->chiprev);
-	return 0;
-}
-
 int brcmf_debugfs_attach(struct brcmf_pub *drvr)
 {
 	struct device *dev = drvr->bus_if->dev;
@@ -58,7 +49,6 @@
 		return -ENODEV;
 
 	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
-	brcmf_debugfs_add_entry(drvr, "chipinfo", brcmf_debugfs_chipinfo_read);
 
 	return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
 }
@@ -74,44 +64,12 @@
 	return drvr->dbgfs_dir;
 }
 
-struct brcmf_debugfs_entry {
-	int (*read)(struct seq_file *seq, void *data);
-	struct brcmf_pub *drvr;
-};
-
-static int brcmf_debugfs_entry_open(struct inode *inode, struct file *f)
-{
-	struct brcmf_debugfs_entry *entry = inode->i_private;
-
-	return single_open(f, entry->read, entry->drvr->bus_if->dev);
-}
-
-static const struct file_operations brcmf_debugfs_def_ops = {
-	.owner = THIS_MODULE,
-	.open = brcmf_debugfs_entry_open,
-	.release = single_release,
-	.read = seq_read,
-	.llseek = seq_lseek
-};
-
 int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
 			    int (*read_fn)(struct seq_file *seq, void *data))
 {
-	struct dentry *dentry =  drvr->dbgfs_dir;
-	struct brcmf_debugfs_entry *entry;
+	struct dentry *e;
 
-	if (IS_ERR_OR_NULL(dentry))
-		return -ENOENT;
-
-	entry = devm_kzalloc(drvr->bus_if->dev, sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-
-	entry->read = read_fn;
-	entry->drvr = drvr;
-
-	dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry,
-				     &brcmf_debugfs_def_ops);
-
-	return PTR_ERR_OR_ZERO(dentry);
+	e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
+					drvr->dbgfs_dir, read_fn);
+	return PTR_ERR_OR_ZERO(e);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index 7748a1c..1e94e94 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -124,10 +124,12 @@
 	struct brcmf_if *ifp = drvr->iflist[0];
 
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
 	if (drvr->bus_if->wowl_supported)
 		brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
 	if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
 		brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_P2P, "p2p");
 
 	/* set chip related quirks */
 	switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
index f5832e0..6b381f7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -19,12 +19,18 @@
 /*
  * Features:
  *
+ * MBSS: multiple BSSID support (eg. guest network in AP mode).
  * MCHAN: multi-channel for concurrent P2P.
+ * PNO: preferred network offload.
+ * WOWL: Wake-On-WLAN.
+ * P2P: peer-to-peer
  */
 #define BRCMF_FEAT_LIST \
 	BRCMF_FEAT_DEF(MBSS) \
 	BRCMF_FEAT_DEF(MCHAN) \
-	BRCMF_FEAT_DEF(WOWL)
+	BRCMF_FEAT_DEF(PNO) \
+	BRCMF_FEAT_DEF(WOWL) \
+	BRCMF_FEAT_DEF(P2P)
 /*
  * Quirks:
  *
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 9cb9915..743f16b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -23,6 +23,10 @@
 #include "debug.h"
 #include "firmware.h"
 
+#define BRCMF_FW_MAX_NVRAM_SIZE			64000
+#define BRCMF_FW_NVRAM_DEVPATH_LEN		19	/* devpath0=pcie/1/4/ */
+#define BRCMF_FW_NVRAM_PCIEDEV_LEN		10	/* pcie/1/4/ + \0 */
+
 char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
 module_param_string(firmware_path, brcmf_firmware_path,
 		    BRCMF_FW_PATH_LEN, 0440);
@@ -39,25 +43,35 @@
  * struct nvram_parser - internal info for parser.
  *
  * @state: current parser state.
- * @fwnv: input buffer being parsed.
+ * @data: input buffer being parsed.
  * @nvram: output buffer with parse result.
  * @nvram_len: lenght of parse result.
  * @line: current line.
  * @column: current column in line.
  * @pos: byte offset in input buffer.
  * @entry: start position of key,value entry.
+ * @multi_dev_v1: detect pcie multi device v1 (compressed).
+ * @multi_dev_v2: detect pcie multi device v2.
  */
 struct nvram_parser {
 	enum nvram_parser_state state;
-	const struct firmware *fwnv;
+	const u8 *data;
 	u8 *nvram;
 	u32 nvram_len;
 	u32 line;
 	u32 column;
 	u32 pos;
 	u32 entry;
+	bool multi_dev_v1;
+	bool multi_dev_v2;
 };
 
+/**
+ * is_nvram_char() - check if char is a valid one for NVRAM entry
+ *
+ * It accepts all printable ASCII chars except for '#' which opens a comment.
+ * Please note that ' ' (space) while accepted is not a valid key name char.
+ */
 static bool is_nvram_char(char c)
 {
 	/* comment marker excluded */
@@ -65,7 +79,7 @@
 		return false;
 
 	/* key and value may have any other readable character */
-	return (c > 0x20 && c < 0x7f);
+	return (c >= 0x20 && c < 0x7f);
 }
 
 static bool is_whitespace(char c)
@@ -77,7 +91,7 @@
 {
 	char c;
 
-	c = nvp->fwnv->data[nvp->pos];
+	c = nvp->data[nvp->pos];
 	if (c == '\n')
 		return COMMENT;
 	if (is_whitespace(c))
@@ -101,14 +115,18 @@
 	enum nvram_parser_state st = nvp->state;
 	char c;
 
-	c = nvp->fwnv->data[nvp->pos];
+	c = nvp->data[nvp->pos];
 	if (c == '=') {
 		/* ignore RAW1 by treating as comment */
-		if (strncmp(&nvp->fwnv->data[nvp->entry], "RAW1", 4) == 0)
+		if (strncmp(&nvp->data[nvp->entry], "RAW1", 4) == 0)
 			st = COMMENT;
 		else
 			st = VALUE;
-	} else if (!is_nvram_char(c)) {
+		if (strncmp(&nvp->data[nvp->entry], "devpath", 7) == 0)
+			nvp->multi_dev_v1 = true;
+		if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0)
+			nvp->multi_dev_v2 = true;
+	} else if (!is_nvram_char(c) || c == ' ') {
 		brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
 			  nvp->line, nvp->column);
 		return COMMENT;
@@ -127,12 +145,14 @@
 	char *ekv;
 	u32 cplen;
 
-	c = nvp->fwnv->data[nvp->pos];
+	c = nvp->data[nvp->pos];
 	if (!is_nvram_char(c)) {
 		/* key,value pair complete */
-		ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
-		skv = (u8 *)&nvp->fwnv->data[nvp->entry];
+		ekv = (u8 *)&nvp->data[nvp->pos];
+		skv = (u8 *)&nvp->data[nvp->entry];
 		cplen = ekv - skv;
+		if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
+			return END;
 		/* copy to output buffer */
 		memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
 		nvp->nvram_len += cplen;
@@ -148,17 +168,20 @@
 static enum nvram_parser_state
 brcmf_nvram_handle_comment(struct nvram_parser *nvp)
 {
-	char *eol, *sol;
+	char *eoc, *sol;
 
-	sol = (char *)&nvp->fwnv->data[nvp->pos];
-	eol = strchr(sol, '\n');
-	if (eol == NULL)
-		return END;
+	sol = (char *)&nvp->data[nvp->pos];
+	eoc = strchr(sol, '\n');
+	if (!eoc) {
+		eoc = strchr(sol, '\0');
+		if (!eoc)
+			return END;
+	}
 
 	/* eat all moving to next line */
 	nvp->line++;
 	nvp->column = 1;
-	nvp->pos += (eol - sol) + 1;
+	nvp->pos += (eoc - sol) + 1;
 	return IDLE;
 }
 
@@ -178,12 +201,20 @@
 };
 
 static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
-				   const struct firmware *nv)
+				   const u8 *data, size_t data_len)
 {
+	size_t size;
+
 	memset(nvp, 0, sizeof(*nvp));
-	nvp->fwnv = nv;
+	nvp->data = data;
+	/* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
+	if (data_len > BRCMF_FW_MAX_NVRAM_SIZE)
+		size = BRCMF_FW_MAX_NVRAM_SIZE;
+	else
+		size = data_len;
 	/* Alloc for extra 0 byte + roundup by 4 + length field */
-	nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+	size += 1 + 3 + sizeof(u32);
+	nvp->nvram = kzalloc(size, GFP_KERNEL);
 	if (!nvp->nvram)
 		return -ENOMEM;
 
@@ -192,26 +223,171 @@
 	return 0;
 }
 
+/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v1 is the version where nvram is stored
+ * compressed and "devpath" maps to index for valid entries.
+ */
+static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
+				    u16 bus_nr)
+{
+	/* Device path with a leading '=' key-value separator */
+	char pci_path[] = "=pci/?/?";
+	size_t pci_len;
+	char pcie_path[] = "=pcie/?/?";
+	size_t pcie_len;
+
+	u32 i, j;
+	bool found;
+	u8 *nvram;
+	u8 id;
+
+	nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+	if (!nvram)
+		goto fail;
+
+	/* min length: devpath0=pcie/1/4/ + 0:x=y */
+	if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
+		goto fail;
+
+	/* First search for the devpathX and see if it is the configuration
+	 * for domain_nr/bus_nr. Search complete nvp
+	 */
+	snprintf(pci_path, sizeof(pci_path), "=pci/%d/%d", domain_nr,
+		 bus_nr);
+	pci_len = strlen(pci_path);
+	snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
+		 bus_nr);
+	pcie_len = strlen(pcie_path);
+	found = false;
+	i = 0;
+	while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
+		/* Format: devpathX=pcie/Y/Z/
+		 * Y = domain_nr, Z = bus_nr, X = virtual ID
+		 */
+		if (strncmp(&nvp->nvram[i], "devpath", 7) == 0 &&
+		    (!strncmp(&nvp->nvram[i + 8], pci_path, pci_len) ||
+		     !strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len))) {
+			id = nvp->nvram[i + 7] - '0';
+			found = true;
+			break;
+		}
+		while (nvp->nvram[i] != 0)
+			i++;
+		i++;
+	}
+	if (!found)
+		goto fail;
+
+	/* Now copy all valid entries, release old nvram and assign new one */
+	i = 0;
+	j = 0;
+	while (i < nvp->nvram_len) {
+		if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
+			i += 2;
+			while (nvp->nvram[i] != 0) {
+				nvram[j] = nvp->nvram[i];
+				i++;
+				j++;
+			}
+			nvram[j] = 0;
+			j++;
+		}
+		while (nvp->nvram[i] != 0)
+			i++;
+		i++;
+	}
+	kfree(nvp->nvram);
+	nvp->nvram = nvram;
+	nvp->nvram_len = j;
+	return;
+
+fail:
+	kfree(nvram);
+	nvp->nvram_len = 0;
+}
+
+/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v2 is the version where nvram is stored
+ * uncompressed, all relevant valid entries are identified by
+ * pcie/domain_nr/bus_nr:
+ */
+static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
+				    u16 bus_nr)
+{
+	char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
+	size_t len;
+	u32 i, j;
+	u8 *nvram;
+
+	nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+	if (!nvram)
+		goto fail;
+
+	/* Copy all valid entries, release old nvram and assign new one.
+	 * Valid entries are of type pcie/X/Y/ where X = domain_nr and
+	 * Y = bus_nr.
+	 */
+	snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
+	len = strlen(prefix);
+	i = 0;
+	j = 0;
+	while (i < nvp->nvram_len - len) {
+		if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
+			i += len;
+			while (nvp->nvram[i] != 0) {
+				nvram[j] = nvp->nvram[i];
+				i++;
+				j++;
+			}
+			nvram[j] = 0;
+			j++;
+		}
+		while (nvp->nvram[i] != 0)
+			i++;
+		i++;
+	}
+	kfree(nvp->nvram);
+	nvp->nvram = nvram;
+	nvp->nvram_len = j;
+	return;
+fail:
+	kfree(nvram);
+	nvp->nvram_len = 0;
+}
+
 /* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
  * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
  * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
  * End of buffer is completed with token identifying length of buffer.
  */
-static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
+static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
+				  u32 *new_length, u16 domain_nr, u16 bus_nr)
 {
 	struct nvram_parser nvp;
 	u32 pad;
 	u32 token;
 	__le32 token_le;
 
-	if (brcmf_init_nvram_parser(&nvp, nv) < 0)
+	if (brcmf_init_nvram_parser(&nvp, data, data_len) < 0)
 		return NULL;
 
-	while (nvp.pos < nv->size) {
+	while (nvp.pos < data_len) {
 		nvp.state = nv_parser_states[nvp.state](&nvp);
 		if (nvp.state == END)
 			break;
 	}
+	if (nvp.multi_dev_v1)
+		brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
+	else if (nvp.multi_dev_v2)
+		brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+
+	if (nvp.nvram_len == 0) {
+		kfree(nvp.nvram);
+		return NULL;
+	}
+
 	pad = nvp.nvram_len;
 	*new_length = roundup(nvp.nvram_len + 1, 4);
 	while (pad != *new_length) {
@@ -239,6 +415,8 @@
 	u16 flags;
 	const struct firmware *code;
 	const char *nvram_name;
+	u16 domain_nr;
+	u16 bus_nr;
 	void (*done)(struct device *dev, const struct firmware *fw,
 		     void *nvram_image, u32 nvram_len);
 };
@@ -254,7 +432,8 @@
 		goto fail;
 
 	if (fw) {
-		nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
+		nvram = brcmf_fw_nvram_strip(fw->data, fw->size, &nvram_length,
+					     fwctx->domain_nr, fwctx->bus_nr);
 		release_firmware(fw);
 		if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
 			goto fail;
@@ -309,11 +488,12 @@
 	kfree(fwctx);
 }
 
-int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
-			   const char *code, const char *nvram,
-			   void (*fw_cb)(struct device *dev,
-					 const struct firmware *fw,
-					 void *nvram_image, u32 nvram_len))
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+				const char *code, const char *nvram,
+				void (*fw_cb)(struct device *dev,
+					      const struct firmware *fw,
+					      void *nvram_image, u32 nvram_len),
+				u16 domain_nr, u16 bus_nr)
 {
 	struct brcmf_fw *fwctx;
 
@@ -333,8 +513,21 @@
 	fwctx->done = fw_cb;
 	if (flags & BRCMF_FW_REQUEST_NVRAM)
 		fwctx->nvram_name = nvram;
+	fwctx->domain_nr = domain_nr;
+	fwctx->bus_nr = bus_nr;
 
 	return request_firmware_nowait(THIS_MODULE, true, code, dev,
 				       GFP_KERNEL, fwctx,
 				       brcmf_fw_request_code_done);
 }
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+			   const char *code, const char *nvram,
+			   void (*fw_cb)(struct device *dev,
+					 const struct firmware *fw,
+					 void *nvram_image, u32 nvram_len))
+{
+	return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
+					   0);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index 4d34823..604dd48 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -32,6 +32,12 @@
  * fails it will not use the callback, but call device_release_driver()
  * instead which will call the driver .remove() callback.
  */
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+				const char *code, const char *nvram,
+				void (*fw_cb)(struct device *dev,
+					      const struct firmware *fw,
+					      void *nvram_image, u32 nvram_len),
+				u16 domain_nr, u16 bus_nr);
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
 			   const char *code, const char *nvram,
 			   void (*fw_cb)(struct device *dev,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index eb13253..5944063 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -249,8 +249,8 @@
 }
 
 
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
-			    struct sk_buff *skb)
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+			   struct sk_buff *skb)
 {
 	struct brcmf_flowring_ring *ring;
 
@@ -271,6 +271,7 @@
 		if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
 			brcmf_flowring_block(flow, flowid, false);
 	}
+	return skb_queue_len(&ring->skblist);
 }
 
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
index a34cd39..5551861 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
@@ -64,8 +64,8 @@
 void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
 void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
-			    struct sk_buff *skb);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+			   struct sk_buff *skb);
 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
 			     struct sk_buff *skb);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 3749209..297911f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -32,7 +32,11 @@
 #define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
 #define BRCMF_BSS_RSSI_ON_CHANNEL	0x0002
 
-#define BRCMF_STA_ASSOC			0x10		/* Associated */
+#define BRCMF_STA_WME              0x00000002      /* WMM association */
+#define BRCMF_STA_AUTHE            0x00000008      /* Authenticated */
+#define BRCMF_STA_ASSOC            0x00000010      /* Associated */
+#define BRCMF_STA_AUTHO            0x00000020      /* Authorized */
+#define BRCMF_STA_SCBSTATS         0x00004000      /* Per STA debug stats */
 
 /* size of brcmf_scan_params not including variable length array */
 #define BRCMF_SCAN_PARAMS_FIXED_SIZE	64
@@ -113,6 +117,7 @@
 #define BRCMF_WOWL_MAXPATTERNSIZE	128
 
 #define BRCMF_COUNTRY_BUF_SZ		4
+#define BRCMF_ANT_MAX			4
 
 /* join preference types for join_pref iovar */
 enum brcmf_join_pref_types {
@@ -456,25 +461,61 @@
 };
 
 struct brcmf_sta_info_le {
-	__le16	ver;		/* version of this struct */
-	__le16	len;		/* length in bytes of this structure */
-	__le16	cap;		/* sta's advertised capabilities */
-	__le32	flags;		/* flags defined below */
-	__le32	idle;		/* time since data pkt rx'd from sta */
-	u8	ea[ETH_ALEN];		/* Station address */
-	__le32	count;			/* # rates in this set */
-	u8	rates[BRCMF_MAXRATES_IN_SET];	/* rates in 500kbps units */
+	__le16 ver;		/* version of this struct */
+	__le16 len;		/* length in bytes of this structure */
+	__le16 cap;		/* sta's advertised capabilities */
+	__le32 flags;		/* flags defined below */
+	__le32 idle;		/* time since data pkt rx'd from sta */
+	u8 ea[ETH_ALEN];		/* Station address */
+	__le32 count;			/* # rates in this set */
+	u8 rates[BRCMF_MAXRATES_IN_SET];	/* rates in 500kbps units */
 						/* w/hi bit set if basic */
-	__le32	in;		/* seconds elapsed since associated */
-	__le32	listen_interval_inms; /* Min Listen interval in ms for STA */
-	__le32	tx_pkts;	/* # of packets transmitted */
-	__le32	tx_failures;	/* # of packets failed */
-	__le32	rx_ucast_pkts;	/* # of unicast packets received */
-	__le32	rx_mcast_pkts;	/* # of multicast packets received */
-	__le32	tx_rate;	/* Rate of last successful tx frame */
-	__le32	rx_rate;	/* Rate of last successful rx frame */
-	__le32	rx_decrypt_succeeds;	/* # of packet decrypted successfully */
-	__le32	rx_decrypt_failures;	/* # of packet decrypted failed */
+	__le32 in;		/* seconds elapsed since associated */
+	__le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+	__le32 tx_pkts;	/* # of packets transmitted */
+	__le32 tx_failures;	/* # of packets failed */
+	__le32 rx_ucast_pkts;	/* # of unicast packets received */
+	__le32 rx_mcast_pkts;	/* # of multicast packets received */
+	__le32 tx_rate;	/* Rate of last successful tx frame */
+	__le32 rx_rate;	/* Rate of last successful rx frame */
+	__le32 rx_decrypt_succeeds;	/* # of packet decrypted successfully */
+	__le32 rx_decrypt_failures;	/* # of packet decrypted failed */
+	__le32 tx_tot_pkts;    /* # of tx pkts (ucast + mcast) */
+	__le32 rx_tot_pkts;    /* # of data packets recvd (uni + mcast) */
+	__le32 tx_mcast_pkts;  /* # of mcast pkts txed */
+	__le64 tx_tot_bytes;   /* data bytes txed (ucast + mcast) */
+	__le64 rx_tot_bytes;   /* data bytes recvd (ucast + mcast) */
+	__le64 tx_ucast_bytes; /* data bytes txed (ucast) */
+	__le64 tx_mcast_bytes; /* # data bytes txed (mcast) */
+	__le64 rx_ucast_bytes; /* data bytes recvd (ucast) */
+	__le64 rx_mcast_bytes; /* data bytes recvd (mcast) */
+	s8 rssi[BRCMF_ANT_MAX];   /* per antenna rssi */
+	s8 nf[BRCMF_ANT_MAX];     /* per antenna noise floor */
+	__le16 aid;                    /* association ID */
+	__le16 ht_capabilities;        /* advertised ht caps */
+	__le16 vht_flags;              /* converted vht flags */
+	__le32 tx_pkts_retry_cnt;      /* # of frames where a retry was
+					 * exhausted.
+					 */
+	__le32 tx_pkts_retry_exhausted; /* # of user frames where a retry
+					 * was exhausted
+					 */
+	s8 rx_lastpkt_rssi[BRCMF_ANT_MAX]; /* Per antenna RSSI of last
+					    * received data frame.
+					    */
+	/* TX WLAN retry/failure statistics:
+	 * Separated for host requested frames and locally generated frames.
+	 * Include unicast frame only where the retries/failures can be counted.
+	 */
+	__le32 tx_pkts_total;          /* # user frames sent successfully */
+	__le32 tx_pkts_retries;        /* # user frames retries */
+	__le32 tx_pkts_fw_total;       /* # FW generated sent successfully */
+	__le32 tx_pkts_fw_retries;     /* # retries for FW generated frames */
+	__le32 tx_pkts_fw_retry_exhausted;     /* # FW generated where a retry
+						* was exhausted
+						*/
+	__le32 rx_pkts_retried;        /* # rx with retry bit set */
+	__le32 tx_rate_fallback;       /* lowest fallback TX rate */
 };
 
 struct brcmf_chanspec_list {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0dda0e..5017eaa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -635,7 +635,7 @@
 	return 0;
 }
 
-static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
 					  u32 slot_id, struct sk_buff **pktout,
 					  bool remove_item)
 {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 65efb14..898c380 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -73,8 +73,10 @@
 #define BRCMF_MSGBUF_TX_FLUSH_CNT1		32
 #define BRCMF_MSGBUF_TX_FLUSH_CNT2		96
 
-#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS	64
+#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS	96
 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS	32
+#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS		48
+
 
 struct msgbuf_common_hdr {
 	u8				msgtype;
@@ -278,16 +280,6 @@
 	struct brcmf_msgbuf_pktid *array;
 };
 
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
 
 
@@ -462,7 +454,6 @@
 		memcpy(msgbuf->ioctbuf, buf, buf_len);
 	else
 		memset(msgbuf->ioctbuf, 0, buf_len);
-	brcmf_dma_flush(ioctl_buf, buf_len);
 
 	err = brcmf_commonring_write_complete(commonring);
 	brcmf_commonring_unlock(commonring);
@@ -795,6 +786,8 @@
 	struct brcmf_flowring *flow = msgbuf->flow;
 	struct ethhdr *eh = (struct ethhdr *)(skb->data);
 	u32 flowid;
+	u32 queue_count;
+	bool force;
 
 	flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
 	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
@@ -802,8 +795,9 @@
 		if (flowid == BRCMF_FLOWRING_INVALID_ID)
 			return -ENOMEM;
 	}
-	brcmf_flowring_enqueue(flow, flowid, skb);
-	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
+	queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
+	force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
+	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
 
 	return 0;
 }
@@ -1265,19 +1259,27 @@
 {
 	void *buf;
 	u16 count;
+	u16 processed;
 
 again:
 	buf = brcmf_commonring_get_read_ptr(commonring, &count);
 	if (buf == NULL)
 		return;
 
+	processed = 0;
 	while (count) {
 		brcmf_msgbuf_process_msgtype(msgbuf,
 					     buf + msgbuf->rx_dataoffset);
 		buf += brcmf_commonring_len_item(commonring);
+		processed++;
+		if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
+			brcmf_commonring_read_complete(commonring, processed);
+			processed = 0;
+		}
 		count--;
 	}
-	brcmf_commonring_read_complete(commonring);
+	if (processed)
+		brcmf_commonring_read_complete(commonring, processed);
 
 	if (commonring->r_ptr == 0)
 		goto again;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index c824570..03f35e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -39,10 +39,16 @@
 	if (!sdiodev->pdata)
 		return;
 
+	if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+		sdiodev->pdata->drive_strength = val;
+
+	/* make sure there are interrupts defined in the node */
+	if (!of_find_property(np, "interrupts", NULL))
+		return;
+
 	irq = irq_of_parse_and_map(np, 0);
 	if (!irq) {
 		brcmf_err("interrupt could not be mapped\n");
-		devm_kfree(dev, sdiodev->pdata);
 		return;
 	}
 	irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
@@ -50,7 +56,4 @@
 	sdiodev->pdata->oob_irq_supported = true;
 	sdiodev->pdata->oob_irq_nr = irq;
 	sdiodev->pdata->oob_irq_flags = irqf;
-
-	if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
-		sdiodev->pdata->drive_strength = val;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 710fbe5..a9ba775 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
 #include <net/cfg80211.h>
 
 #include <brcmu_wifi.h>
@@ -1908,105 +1909,6 @@
 
 
 /**
- * brcmf_p2p_attach() - attach for P2P.
- *
- * @cfg: driver private data for cfg80211 interface.
- */
-s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_if *pri_ifp;
-	struct brcmf_if *p2p_ifp;
-	struct brcmf_cfg80211_vif *p2p_vif;
-	struct brcmf_p2p_info *p2p;
-	struct brcmf_pub *drvr;
-	s32 bssidx;
-	s32 err = 0;
-
-	p2p = &cfg->p2p;
-	p2p->cfg = cfg;
-
-	drvr = cfg->pub;
-
-	pri_ifp = drvr->iflist[0];
-	p2p_ifp = drvr->iflist[1];
-
-	p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
-
-	if (p2p_ifp) {
-		p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
-					  false);
-		if (IS_ERR(p2p_vif)) {
-			brcmf_err("could not create discovery vif\n");
-			err = -ENOMEM;
-			goto exit;
-		}
-
-		p2p_vif->ifp = p2p_ifp;
-		p2p_ifp->vif = p2p_vif;
-		p2p_vif->wdev.netdev = p2p_ifp->ndev;
-		p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
-		SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
-
-		p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
-
-		brcmf_p2p_generate_bss_mac(p2p, NULL);
-		memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
-		brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
-
-		/* Initialize P2P Discovery in the firmware */
-		err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
-		if (err < 0) {
-			brcmf_err("set p2p_disc error\n");
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-		/* obtain bsscfg index for P2P discovery */
-		err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
-		if (err < 0) {
-			brcmf_err("retrieving discover bsscfg index failed\n");
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-		/* Verify that firmware uses same bssidx as driver !! */
-		if (p2p_ifp->bssidx != bssidx) {
-			brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
-				  bssidx, p2p_ifp->bssidx);
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-
-		init_completion(&p2p->send_af_done);
-		INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
-		init_completion(&p2p->afx_hdl.act_frm_scan);
-		init_completion(&p2p->wait_next_af);
-	}
-exit:
-	return err;
-}
-
-
-/**
- * brcmf_p2p_detach() - detach P2P.
- *
- * @p2p: P2P specific data.
- */
-void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
-{
-	struct brcmf_cfg80211_vif *vif;
-
-	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
-	if (vif != NULL) {
-		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
-		brcmf_p2p_deinit_discovery(p2p);
-		/* remove discovery interface */
-		brcmf_free_vif(vif);
-		p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-	}
-	/* just set it all to zero */
-	memset(p2p, 0, sizeof(*p2p));
-}
-
-/**
  * brcmf_p2p_get_current_chanspec() - Get current operation channel.
  *
  * @p2p: P2P specific data.
@@ -2238,6 +2140,7 @@
 {
 	cfg80211_unregister_wdev(&vif->wdev);
 	p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+	brcmf_remove_interface(vif->ifp->drvr, vif->ifp->bssidx);
 	brcmf_free_vif(vif);
 }
 
@@ -2364,6 +2267,8 @@
 		break;
 
 	case NL80211_IFTYPE_P2P_DEVICE:
+		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+		brcmf_p2p_deinit_discovery(p2p);
 		brcmf_p2p_delete_p2pdev(p2p, vif);
 		return 0;
 	default:
@@ -2425,3 +2330,103 @@
 	clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
 	mutex_unlock(&cfg->usr_sync);
 }
+
+/**
+ * brcmf_p2p_attach() - attach for P2P.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ */
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_if *pri_ifp;
+	struct brcmf_if *p2p_ifp;
+	struct brcmf_cfg80211_vif *p2p_vif;
+	struct brcmf_p2p_info *p2p;
+	struct brcmf_pub *drvr;
+	s32 bssidx;
+	s32 err = 0;
+
+	p2p = &cfg->p2p;
+	p2p->cfg = cfg;
+
+	drvr = cfg->pub;
+
+	pri_ifp = drvr->iflist[0];
+	p2p_ifp = drvr->iflist[1];
+
+	p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
+	if (p2p_ifp) {
+		p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
+					  false);
+		if (IS_ERR(p2p_vif)) {
+			brcmf_err("could not create discovery vif\n");
+			err = -ENOMEM;
+			goto exit;
+		}
+
+		p2p_vif->ifp = p2p_ifp;
+		p2p_ifp->vif = p2p_vif;
+		p2p_vif->wdev.netdev = p2p_ifp->ndev;
+		p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
+		SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
+
+		p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
+
+		brcmf_p2p_generate_bss_mac(p2p, NULL);
+		memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
+		brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
+
+		/* Initialize P2P Discovery in the firmware */
+		err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
+		if (err < 0) {
+			brcmf_err("set p2p_disc error\n");
+			brcmf_free_vif(p2p_vif);
+			goto exit;
+		}
+		/* obtain bsscfg index for P2P discovery */
+		err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
+		if (err < 0) {
+			brcmf_err("retrieving discover bsscfg index failed\n");
+			brcmf_free_vif(p2p_vif);
+			goto exit;
+		}
+		/* Verify that firmware uses same bssidx as driver !! */
+		if (p2p_ifp->bssidx != bssidx) {
+			brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
+				  bssidx, p2p_ifp->bssidx);
+			brcmf_free_vif(p2p_vif);
+			goto exit;
+		}
+
+		init_completion(&p2p->send_af_done);
+		INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+		init_completion(&p2p->afx_hdl.act_frm_scan);
+		init_completion(&p2p->wait_next_af);
+	}
+exit:
+	return err;
+}
+
+/**
+ * brcmf_p2p_detach() - detach P2P.
+ *
+ * @p2p: P2P specific data.
+ */
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
+{
+	struct brcmf_cfg80211_vif *vif;
+
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	if (vif != NULL) {
+		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+		brcmf_p2p_deinit_discovery(p2p);
+		/* remove discovery interface */
+		rtnl_lock();
+		brcmf_p2p_delete_p2pdev(p2p, vif);
+		rtnl_unlock();
+	}
+	/* just set it all to zero */
+	memset(p2p, 0, sizeof(*p2p));
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 1831ecd..3a98c43 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -51,6 +51,8 @@
 #define BRCMF_PCIE_4356_NVRAM_NAME		"brcm/brcmfmac4356-pcie.txt"
 #define BRCMF_PCIE_43570_FW_NAME		"brcm/brcmfmac43570-pcie.bin"
 #define BRCMF_PCIE_43570_NVRAM_NAME		"brcm/brcmfmac43570-pcie.txt"
+#define BRCMF_PCIE_4358_FW_NAME			"brcm/brcmfmac4358-pcie.bin"
+#define BRCMF_PCIE_4358_NVRAM_NAME		"brcm/brcmfmac4358-pcie.txt"
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT		2000 /* msec */
 
@@ -110,10 +112,11 @@
 						 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
 						 BRCMF_PCIE_MB_INT_D2H3_DB1)
 
-#define BRCMF_PCIE_MIN_SHARED_VERSION		4
+#define BRCMF_PCIE_MIN_SHARED_VERSION		5
 #define BRCMF_PCIE_MAX_SHARED_VERSION		5
 #define BRCMF_PCIE_SHARED_VERSION_MASK		0x00FF
-#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT	0x4000
+#define BRCMF_PCIE_SHARED_DMA_INDEX		0x10000
+#define BRCMF_PCIE_SHARED_DMA_2B_IDX		0x100000
 
 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT		0x4000
 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT		0x8000
@@ -145,6 +148,10 @@
 #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET	8
 #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET	12
 #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET	16
+#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET	20
+#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET	28
+#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET	36
+#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET	44
 #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET	0
 #define BRCMF_SHARED_RING_MAX_SUB_QUEUES	52
 
@@ -189,6 +196,8 @@
 MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
 
 
 struct brcmf_pcie_console {
@@ -244,6 +253,13 @@
 	bool mbdata_completed;
 	bool irq_allocated;
 	bool wowl_enabled;
+	u8 dma_idx_sz;
+	void *idxbuf;
+	u32 idxbuf_sz;
+	dma_addr_t idxbuf_dmahandle;
+	u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
+	void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+			  u16 value);
 };
 
 struct brcmf_pcie_ringbuf {
@@ -273,15 +289,6 @@
 };
 
 
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
 static u32
 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
 {
@@ -329,6 +336,25 @@
 }
 
 
+static u16
+brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+	u16 *address = devinfo->idxbuf + mem_offset;
+
+	return (*(address));
+}
+
+
+static void
+brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+		     u16 value)
+{
+	u16 *address = devinfo->idxbuf + mem_offset;
+
+	*(address) = value;
+}
+
+
 static u32
 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
 {
@@ -874,7 +900,7 @@
 	brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
 		  commonring->w_ptr, ring->id);
 
-	brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
+	devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
 
 	return 0;
 }
@@ -892,7 +918,7 @@
 	brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
 		  commonring->r_ptr, ring->id);
 
-	brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
+	devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
 
 	return 0;
 }
@@ -921,7 +947,7 @@
 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
 		return -EIO;
 
-	commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
+	commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
 
 	brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
 		  commonring->w_ptr, ring->id);
@@ -939,7 +965,7 @@
 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
 		return -EIO;
 
-	commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
+	commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
 
 	brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
 		  commonring->r_ptr, ring->id);
@@ -1044,6 +1070,13 @@
 	}
 	kfree(devinfo->shared.flowrings);
 	devinfo->shared.flowrings = NULL;
+	if (devinfo->idxbuf) {
+		dma_free_coherent(&devinfo->pdev->dev,
+				  devinfo->idxbuf_sz,
+				  devinfo->idxbuf,
+				  devinfo->idxbuf_dmahandle);
+		devinfo->idxbuf = NULL;
+	}
 }
 
 
@@ -1059,19 +1092,72 @@
 	u32 addr;
 	u32 ring_mem_ptr;
 	u32 i;
+	u64 address;
+	u32 bufsz;
 	u16 max_sub_queues;
+	u8 idx_offset;
 
 	ring_addr = devinfo->shared.ring_info_addr;
 	brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
+	addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+	max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
 
-	addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
-	d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
-	addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
-	d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
-	addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
-	h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
-	addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
-	h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+	if (devinfo->dma_idx_sz != 0) {
+		bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
+			devinfo->dma_idx_sz * 2;
+		devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
+						     &devinfo->idxbuf_dmahandle,
+						     GFP_KERNEL);
+		if (!devinfo->idxbuf)
+			devinfo->dma_idx_sz = 0;
+	}
+
+	if (devinfo->dma_idx_sz == 0) {
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+		d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+		d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+		h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+		h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		idx_offset = sizeof(u32);
+		devinfo->write_ptr = brcmf_pcie_write_tcm16;
+		devinfo->read_ptr = brcmf_pcie_read_tcm16;
+		brcmf_dbg(PCIE, "Using TCM indices\n");
+	} else {
+		memset(devinfo->idxbuf, 0, bufsz);
+		devinfo->idxbuf_sz = bufsz;
+		idx_offset = devinfo->dma_idx_sz;
+		devinfo->write_ptr = brcmf_pcie_write_idx;
+		devinfo->read_ptr = brcmf_pcie_read_idx;
+
+		h2d_w_idx_ptr = 0;
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
+		address = (u64)devinfo->idxbuf_dmahandle;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+		h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
+		address += max_sub_queues * idx_offset;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+		d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
+		address += max_sub_queues * idx_offset;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+		d2h_r_idx_ptr = d2h_w_idx_ptr +
+				BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
+		address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+		brcmf_dbg(PCIE, "Using host memory indices\n");
+	}
 
 	addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
 	ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -1085,8 +1171,8 @@
 		ring->id = i;
 		devinfo->shared.commonrings[i] = ring;
 
-		h2d_w_idx_ptr += sizeof(u32);
-		h2d_r_idx_ptr += sizeof(u32);
+		h2d_w_idx_ptr += idx_offset;
+		h2d_r_idx_ptr += idx_offset;
 		ring_mem_ptr += BRCMF_RING_MEM_SZ;
 	}
 
@@ -1100,13 +1186,11 @@
 		ring->id = i;
 		devinfo->shared.commonrings[i] = ring;
 
-		d2h_w_idx_ptr += sizeof(u32);
-		d2h_r_idx_ptr += sizeof(u32);
+		d2h_w_idx_ptr += idx_offset;
+		d2h_r_idx_ptr += idx_offset;
 		ring_mem_ptr += BRCMF_RING_MEM_SZ;
 	}
 
-	addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
-	max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
 	devinfo->shared.nrof_flowrings =
 			max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
 	rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
@@ -1130,15 +1214,15 @@
 					     ring);
 		ring->w_idx_addr = h2d_w_idx_ptr;
 		ring->r_idx_addr = h2d_r_idx_ptr;
-		h2d_w_idx_ptr += sizeof(u32);
-		h2d_r_idx_ptr += sizeof(u32);
+		h2d_w_idx_ptr += idx_offset;
+		h2d_r_idx_ptr += idx_offset;
 	}
 	devinfo->shared.flowrings = rings;
 
 	return 0;
 
 fail:
-	brcmf_err("Allocating commonring buffers failed\n");
+	brcmf_err("Allocating ring buffers failed\n");
 	brcmf_pcie_release_ringbuffers(devinfo);
 	return -ENOMEM;
 }
@@ -1171,7 +1255,6 @@
 		goto fail;
 
 	memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
-	brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
 
 	addr = devinfo->shared.tcm_base_address +
 	       BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
@@ -1189,7 +1272,6 @@
 		goto fail;
 
 	memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
-	brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
 
 	addr = devinfo->shared.tcm_base_address +
 	       BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
@@ -1276,10 +1358,13 @@
 		brcmf_err("Unsupported PCIE version %d\n", version);
 		return -EINVAL;
 	}
-	if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
-		brcmf_err("Unsupported legacy TX mode 0x%x\n",
-			  shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
-		return -EINVAL;
+
+	/* check firmware support dma indicies */
+	if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
+		if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
+			devinfo->dma_idx_sz = sizeof(u16);
+		else
+			devinfo->dma_idx_sz = sizeof(u32);
 	}
 
 	addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
@@ -1333,6 +1418,10 @@
 		fw_name = BRCMF_PCIE_43570_FW_NAME;
 		nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
 		break;
+	case BRCM_CC_4358_CHIP_ID:
+		fw_name = BRCMF_PCIE_4358_FW_NAME;
+		nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
+		break;
 	default:
 		brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
 		return -ENODEV;
@@ -1540,20 +1629,7 @@
 
 static int brcmf_pcie_buscoreprep(void *ctx)
 {
-	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
-	int err;
-
-	err = brcmf_pcie_get_resource(devinfo);
-	if (err == 0) {
-		/* Set CC watchdog to reset all the cores on the chip to bring
-		 * back dongle to a sane state.
-		 */
-		brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE,
-							    watchdog), 4);
-		msleep(100);
-	}
-
-	return err;
+	return brcmf_pcie_get_resource(ctx);
 }
 
 
@@ -1609,7 +1685,7 @@
 		bus->msgbuf->commonrings[i] =
 				&devinfo->shared.commonrings[i]->commonring;
 
-	flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
+	flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
 			    GFP_KERNEL);
 	if (!flowrings)
 		goto fail;
@@ -1641,8 +1717,13 @@
 	struct brcmf_pciedev_info *devinfo;
 	struct brcmf_pciedev *pcie_bus_dev;
 	struct brcmf_bus *bus;
+	u16 domain_nr;
+	u16 bus_nr;
 
-	brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
+	domain_nr = pci_domain_nr(pdev->bus) + 1;
+	bus_nr = pdev->bus->number;
+	brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
+		  domain_nr, bus_nr);
 
 	ret = -ENOMEM;
 	devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
@@ -1691,10 +1772,10 @@
 	if (ret)
 		goto fail_bus;
 
-	ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
-					       BRCMF_FW_REQ_NV_OPTIONAL,
-				     devinfo->fw_name, devinfo->nvram_name,
-				     brcmf_pcie_setup);
+	ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+						    BRCMF_FW_REQ_NV_OPTIONAL,
+					  devinfo->fw_name, devinfo->nvram_name,
+					  brcmf_pcie_setup, domain_nr, bus_nr);
 	if (ret == 0)
 		return 0;
 fail_bus:
@@ -1730,6 +1811,7 @@
 		brcmf_pcie_intr_disable(devinfo);
 
 	brcmf_detach(&pdev->dev);
+	brcmf_pcie_reset_device(devinfo);
 
 	kfree(bus->bus_priv.pcie);
 	kfree(bus->msgbuf->flowrings);
@@ -1850,9 +1932,11 @@
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
 	{ /* end: all zeroes */ }
 };
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index ab0c898..d36f5f3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -601,6 +601,8 @@
 #define BCM43241B0_NVRAM_NAME		"brcm/brcmfmac43241b0-sdio.txt"
 #define BCM43241B4_FIRMWARE_NAME	"brcm/brcmfmac43241b4-sdio.bin"
 #define BCM43241B4_NVRAM_NAME		"brcm/brcmfmac43241b4-sdio.txt"
+#define BCM43241B5_FIRMWARE_NAME	"brcm/brcmfmac43241b5-sdio.bin"
+#define BCM43241B5_NVRAM_NAME		"brcm/brcmfmac43241b5-sdio.txt"
 #define BCM4329_FIRMWARE_NAME		"brcm/brcmfmac4329-sdio.bin"
 #define BCM4329_NVRAM_NAME		"brcm/brcmfmac4329-sdio.txt"
 #define BCM4330_FIRMWARE_NAME		"brcm/brcmfmac4330-sdio.bin"
@@ -628,6 +630,8 @@
 MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
 MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
@@ -667,7 +671,8 @@
 static const struct brcmf_firmware_names brcmf_fwname_data[] = {
 	{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
 	{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
-	{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+	{ BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+	{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
 	{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
 	{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
 	{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
@@ -2815,6 +2820,8 @@
 	struct brcmf_sdio *bus = sdiodev->bus;
 
 	brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
+	if (sdiodev->state != BRCMF_SDIOD_DATA)
+		return -EIO;
 
 	/* Add space for the header */
 	skb_push(pkt, bus->tx_hdrlen);
@@ -2943,6 +2950,8 @@
 	int ret;
 
 	brcmf_dbg(TRACE, "Enter\n");
+	if (sdiodev->state != BRCMF_SDIOD_DATA)
+		return -EIO;
 
 	/* Send from dpc */
 	bus->ctrl_frame_buf = msg;
@@ -3233,6 +3242,8 @@
 	struct brcmf_sdio *bus = sdiodev->bus;
 
 	brcmf_dbg(TRACE, "Enter\n");
+	if (sdiodev->state != BRCMF_SDIOD_DATA)
+		return -EIO;
 
 	/* Wait until control frame is available */
 	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
@@ -3550,10 +3561,6 @@
 		return;
 	}
 
-	if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
-		brcmf_err("bus is down. we have nothing to do\n");
-		return;
-	}
 	/* Count the interrupt call */
 	bus->sdcnt.intrcount++;
 	if (in_interrupt())
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 5df6aa7..daba86d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1270,8 +1270,13 @@
 	bus->chiprev = bus_pub->chiprev;
 
 	/* request firmware here */
-	brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
-			       brcmf_usb_probe_phase2);
+	ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
+				     NULL, brcmf_usb_probe_phase2);
+	if (ret) {
+		brcmf_err("firmware request failed: %d\n", ret);
+		goto fail;
+	}
+
 	return 0;
 
 fail:
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 4813506..ab775a5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -41,8 +41,7 @@
 #define BRCMS_FLUSH_TIMEOUT	500 /* msec */
 
 /* Flags we support */
-#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
-	FIF_ALLMULTI | \
+#define MAC_FILTERS (FIF_ALLMULTI | \
 	FIF_FCSFAIL | \
 	FIF_CONTROL | \
 	FIF_OTHER_BSS | \
@@ -743,8 +742,6 @@
 	changed_flags &= MAC_FILTERS;
 	*total_flags &= MAC_FILTERS;
 
-	if (changed_flags & FIF_PROMISC_IN_BSS)
-		brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
 	if (changed_flags & FIF_ALLMULTI)
 		brcms_dbg_info(core, "FIF_ALLMULTI\n");
 	if (changed_flags & FIF_FCSFAIL)
@@ -1063,10 +1060,9 @@
  */
 static int ieee_hw_init(struct ieee80211_hw *hw)
 {
-	hw->flags = IEEE80211_HW_SIGNAL_DBM
-	    /* | IEEE80211_HW_CONNECTION_MONITOR  What is this? */
-	    | IEEE80211_HW_REPORTS_TX_ACK_STATUS
-	    | IEEE80211_HW_AMPDU_AGGREGATION;
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
 
 	hw->extra_tx_headroom = brcms_c_get_header_len();
 	hw->queues = N_TX_QUEUES;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 369527e..9728be0 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3571,7 +3571,7 @@
 
 	wlc->filter_flags = filter_flags;
 
-	if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+	if (filter_flags & FIF_OTHER_BSS)
 		promisc_bits |= MCTL_PROMISC;
 
 	if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 4efdd51..7a6daa3 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -45,6 +45,7 @@
 #define BRCM_CC_43567_CHIP_ID		43567
 #define BRCM_CC_43569_CHIP_ID		43569
 #define BRCM_CC_43570_CHIP_ID		43570
+#define BRCM_CC_4358_CHIP_ID		0x4358
 #define BRCM_CC_43602_CHIP_ID		43602
 
 /* USB Device IDs */
@@ -59,9 +60,11 @@
 #define BRCM_PCIE_4356_DEVICE_ID	0x43ec
 #define BRCM_PCIE_43567_DEVICE_ID	0x43d3
 #define BRCM_PCIE_43570_DEVICE_ID	0x43d9
+#define BRCM_PCIE_4358_DEVICE_ID	0x43e9
 #define BRCM_PCIE_43602_DEVICE_ID	0x43ba
 #define BRCM_PCIE_43602_2G_DEVICE_ID	0x43bb
 #define BRCM_PCIE_43602_5G_DEVICE_ID	0x43bc
+#define BRCM_PCIE_43602_RAW_DEVICE_ID	43602
 
 /* brcmsmac IDs */
 #define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3689dbb..0e51e27 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -278,14 +278,14 @@
 	else
 		priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
 
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_SUPPORTS_PS |
-		    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
-		    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		    IEEE80211_HW_CONNECTION_MONITOR |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
-		    IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC;
+	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+	ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
 
 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 					  BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index b0f65fa..b86500b 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -578,13 +578,11 @@
 {
 	struct cw1200_common *priv = dev->priv;
 	bool listening = !!(*total_flags &
-			    (FIF_PROMISC_IN_BSS |
-			     FIF_OTHER_BSS |
+			    (FIF_OTHER_BSS |
 			     FIF_BCN_PRBRESP_PROMISC |
 			     FIF_PROBE_REQ));
 
-	*total_flags &= FIF_PROMISC_IN_BSS |
-			FIF_OTHER_BSS |
+	*total_flags &= FIF_OTHER_BSS |
 			FIF_FCSFAIL |
 			FIF_BCN_PRBRESP_PROMISC |
 			FIF_PROBE_REQ;
@@ -592,14 +590,12 @@
 	down(&priv->scan.lock);
 	mutex_lock(&priv->conf_mutex);
 
-	priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
-			? 1 : 0;
+	priv->rx_filter.promiscuous = 0;
 	priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
 			FIF_PROBE_REQ)) ? 1 : 0;
 	priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
 	priv->disable_beacon_filter = !(*total_flags &
 					(FIF_BCN_PRBRESP_PROMISC |
-					 FIF_PROMISC_IN_BSS |
 					 FIF_PROBE_REQ));
 	if (priv->listening != listening) {
 		priv->listening = listening;
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index e566580..7f4cb69 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3048,7 +3048,7 @@
 	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
 		   *total_flags);
 
-	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
 	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
 	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
 
@@ -3074,7 +3074,7 @@
 	 * filters into the device.
 	 */
 	*total_flags &=
-	    FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+	    FIF_OTHER_BSS | FIF_ALLMULTI |
 	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
@@ -3561,8 +3561,10 @@
 	hw->vif_data_size = sizeof(struct il_vif_priv);
 
 	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT |
-		    IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
 
 	hw->wiphy->interface_modes =
 	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 976f65f..44fa422 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5751,11 +5751,13 @@
 	hw->rate_control_algorithm = "iwl-4965-rs";
 
 	/* Tell mac80211 our characteristics */
-	hw->flags =
-	    IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
-	    IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
-	    IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	if (il->cfg->sku & IL_SKU_N)
 		hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
 				       NL80211_FEATURE_STATIC_SMPS;
@@ -6166,7 +6168,7 @@
 	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
 		   *total_flags);
 
-	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
 	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
 	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
 	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -6192,7 +6194,7 @@
 	 * filters into the device.
 	 */
 	*total_flags &=
-	    FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+	    FIF_OTHER_BSS | FIF_ALLMULTI |
 	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index f89f446..aba0957 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -21,6 +21,7 @@
 		Intel 7260 Wi-Fi Adapter
 		Intel 3160 Wi-Fi Adapter
 		Intel 7265 Wi-Fi Adapter
+		Intel 8260 Wi-Fi Adapter
 		Intel 3165 Wi-Fi Adapter
 
 
@@ -54,16 +55,17 @@
 	tristate "Intel Wireless WiFi DVM Firmware support"
 	default IWLWIFI
 	help
-	  This is the driver that supports the DVM firmware which is
-	  used by most existing devices (with the exception of 7260
-	  and 3160).
+	  This is the driver that supports the DVM firmware. The list
+	  of the devices that use this firmware is available here:
+	  https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
 
 config IWLMVM
 	tristate "Intel Wireless WiFi MVM Firmware support"
 	select WANT_DEV_COREDUMP
 	help
-	  This is the driver that supports the MVM firmware which is
-	  currently only available for 7260 and 3160 devices.
+	  This is the driver that supports the MVM firmware. The list
+	  of the devices that use this firmware is available here:
+	  https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
 
 # don't call it _MODULE -- will confuse Kconfig/fixdep/...
 config IWLWIFI_OPMODE_MODULAR
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 3d32f41..dbfc5b1 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,6 +9,7 @@
 iwlwifi-objs		+= pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
 iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
+iwlwifi-objs		+= iwl-trans.o
 
 iwlwifi-objs += $(iwlwifi-m)
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 5abd62e..7acaa26 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -104,15 +104,16 @@
 	hw->rate_control_algorithm = "iwl-agn-rs";
 
 	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
-		    IEEE80211_HW_SPECTRUM_MGMT |
-		    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		    IEEE80211_HW_QUEUE_CONTROL |
-		    IEEE80211_HW_SUPPORTS_PS |
-		    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
-		    IEEE80211_HW_WANT_MONITOR_VIF;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
 
 	hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
 	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -135,7 +136,7 @@
 	 */
 	if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
 	    !iwlwifi_mod_params.sw_crypto)
-		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+		ieee80211_hw_set(hw, MFP_CAPABLE);
 
 	hw->sta_data_size = sizeof(struct iwl_station_priv);
 	hw->vif_data_size = sizeof(struct iwl_vif_priv);
@@ -1061,7 +1062,7 @@
 	IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
 			changed_flags, *total_flags);
 
-	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
 	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
 	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
 	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -1088,7 +1089,7 @@
 	 * since we currently do not support programming multicast
 	 * filters into the device.
 	 */
-	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
 			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
@@ -1140,7 +1141,6 @@
 		return;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->mutex);
 
 	if (priv->lib->bt_params &&
 	    priv->lib->bt_params->advanced_bt_coexist) {
@@ -1149,13 +1149,12 @@
 		else if (event->u.rssi.data == RSSI_EVENT_HIGH)
 			priv->bt_enable_pspoll = false;
 
-		iwlagn_send_advance_bt_config(priv);
+		queue_work(priv->workqueue, &priv->bt_runtime_config);
 	} else {
 		IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
 				"ignoring RSSI callback\n");
 	}
 
-	mutex_unlock(&priv->mutex);
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
@@ -1343,9 +1342,9 @@
 	 * other interfaces are added, this is safe.
 	 */
 	if (vif->type == NL80211_IFTYPE_MONITOR)
-		priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+		ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
 	else
-		priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+		__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags);
 
 	err = iwl_setup_interface(priv, ctx);
 	if (!err || reset)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 74ad278..cc35f79 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX	13
+#define IWL7260_UCODE_API_MAX	15
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK	12
@@ -124,6 +124,28 @@
 	.apmg_wake_up_wa = true,
 };
 
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+	.ct_kill_entry = 118,
+	.ct_kill_exit = 96,
+	.ct_kill_duration = 5,
+	.dynamic_smps_entry = 114,
+	.dynamic_smps_exit = 110,
+	.tx_protection_entry = 114,
+	.tx_protection_exit = 108,
+	.tx_backoff = {
+		{.temperature = 112, .backoff = 300},
+		{.temperature = 113, .backoff = 800},
+		{.temperature = 114, .backoff = 1500},
+		{.temperature = 115, .backoff = 3000},
+		{.temperature = 116, .backoff = 5000},
+		{.temperature = 117, .backoff = 10000},
+	},
+	.support_ct_kill = true,
+	.support_dynamic_smps = true,
+	.support_tx_protection = true,
+	.support_tx_backoff = true,
+};
+
 static const struct iwl_ht_params iwl7000_ht_params = {
 	.stbc = true,
 	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
@@ -166,6 +188,7 @@
 	.host_interrupt_operation_mode = true,
 	.lp_xtal_workaround = true,
 	.dccm_len = IWL7260_DCCM_LEN,
+	.thermal_params = &iwl7000_high_temp_tt_params,
 };
 
 const struct iwl_cfg iwl7260_2n_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index ce6321b..72040cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX	13
+#define IWL8000_UCODE_API_MAX	15
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK	12
@@ -122,24 +122,49 @@
 	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
 };
 
-#define IWL_DEVICE_8000						\
-	.ucode_api_max = IWL8000_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL8000_UCODE_API_OK,			\
-	.ucode_api_min = IWL8000_UCODE_API_MIN,			\
-	.device_family = IWL_DEVICE_FAMILY_8000,		\
-	.max_inst_size = IWL60_RTC_INST_SIZE,			\
-	.max_data_size = IWL60_RTC_DATA_SIZE,			\
-	.base_params = &iwl8000_base_params,			\
-	.led_mode = IWL_LED_RF_STATE,				\
-	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,	\
-	.d0i3 = true,						\
-	.non_shared_ant = ANT_A,				\
-	.dccm_offset = IWL8260_DCCM_OFFSET,			\
-	.dccm_len = IWL8260_DCCM_LEN,				\
-	.dccm2_offset = IWL8260_DCCM2_OFFSET,			\
-	.dccm2_len = IWL8260_DCCM2_LEN,				\
-	.smem_offset = IWL8260_SMEM_OFFSET,			\
-	.smem_len = IWL8260_SMEM_LEN
+static const struct iwl_tt_params iwl8000_tt_params = {
+	.ct_kill_entry = 115,
+	.ct_kill_exit = 93,
+	.ct_kill_duration = 5,
+	.dynamic_smps_entry = 111,
+	.dynamic_smps_exit = 107,
+	.tx_protection_entry = 112,
+	.tx_protection_exit = 105,
+	.tx_backoff = {
+		{.temperature = 110, .backoff = 200},
+		{.temperature = 111, .backoff = 600},
+		{.temperature = 112, .backoff = 1200},
+		{.temperature = 113, .backoff = 2000},
+		{.temperature = 114, .backoff = 4000},
+	},
+	.support_ct_kill = true,
+	.support_dynamic_smps = true,
+	.support_tx_protection = true,
+	.support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_8000							\
+	.ucode_api_max = IWL8000_UCODE_API_MAX,				\
+	.ucode_api_ok = IWL8000_UCODE_API_OK,				\
+	.ucode_api_min = IWL8000_UCODE_API_MIN,				\
+	.device_family = IWL_DEVICE_FAMILY_8000,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,				\
+	.max_data_size = IWL60_RTC_DATA_SIZE,				\
+	.base_params = &iwl8000_base_params,				\
+	.led_mode = IWL_LED_RF_STATE,					\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,		\
+	.d0i3 = true,							\
+	.non_shared_ant = ANT_A,					\
+	.dccm_offset = IWL8260_DCCM_OFFSET,				\
+	.dccm_len = IWL8260_DCCM_LEN,					\
+	.dccm2_offset = IWL8260_DCCM2_OFFSET,				\
+	.dccm2_len = IWL8260_DCCM2_LEN,					\
+	.smem_offset = IWL8260_SMEM_OFFSET,				\
+	.smem_len = IWL8260_SMEM_LEN,					\
+	.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,	\
+	.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,	\
+	.thermal_params = &iwl8000_tt_params,				\
+	.apmg_not_supported = true
 
 const struct iwl_cfg iwl8260_2n_cfg = {
 	.name = "Intel(R) Dual Band Wireless N 8260",
@@ -177,8 +202,6 @@
 	.ht_params = &iwl8000_ht_params,
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-	.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
-	.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +215,6 @@
 	.ht_params = &iwl8000_ht_params,
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-	.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
-	.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
 	.bt_shared_single_ant = true,
 	.disable_dummy_notification = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f33f75..08c14af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -195,6 +195,49 @@
 };
 
 /*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+	s32 temperature;
+	u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time  intervals (in uSec) in which the driver needs
+ *	to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+	s32 ct_kill_entry;
+	s32 ct_kill_exit;
+	u32 ct_kill_duration;
+	s32 dynamic_smps_entry;
+	s32 dynamic_smps_exit;
+	s32 tx_protection_entry;
+	s32 tx_protection_exit;
+	struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+	bool support_ct_kill;
+	bool support_dynamic_smps;
+	bool support_tx_protection;
+	bool support_tx_backoff;
+};
+
+/*
  * information on how to parse the EEPROM
  */
 #define EEPROM_REG_BAND_1_CHANNELS		0x08
@@ -316,6 +359,8 @@
 	const u32 dccm2_len;
 	const u32 smem_offset;
 	const u32 smem_len;
+	const struct iwl_tt_params *thermal_params;
+	bool apmg_not_supported;
 };
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
index 223b875..948ce08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -64,19 +65,21 @@
 
 TRACE_EVENT(iwlwifi_dev_rx,
 	TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
-		 void *rxbuf, size_t len),
-	TP_ARGS(dev, trans, rxbuf, len),
+		 struct iwl_rx_packet *pkt, size_t len),
+	TP_ARGS(dev, trans, pkt, len),
 	TP_STRUCT__entry(
 		DEV_ENTRY
-		__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
+		__field(u8, cmd)
+		__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
 	),
 	TP_fast_assign(
 		DEV_ASSIGN;
-		memcpy(__get_dynamic_array(rxbuf), rxbuf,
-		       iwl_rx_trace_len(trans, rxbuf, len));
+		__entry->cmd = pkt->hdr.cmd;
+		memcpy(__get_dynamic_array(rxbuf), pkt,
+		       iwl_rx_trace_len(trans, pkt, len));
 	),
 	TP_printk("[%s] RX cmd %#.2x",
-		  __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
+		  __get_str(dev), __entry->cmd)
 );
 
 TRACE_EVENT(iwlwifi_dev_tx,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 7267152..6685259 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -423,13 +423,19 @@
 {
 	const struct iwl_ucode_api *ucode_api = (void *)data;
 	u32 api_index = le32_to_cpu(ucode_api->api_index);
+	u32 api_flags = le32_to_cpu(ucode_api->api_flags);
+	int i;
 
-	if (api_index >= IWL_API_ARRAY_SIZE) {
+	if (api_index >= IWL_API_MAX_BITS / 32) {
 		IWL_ERR(drv, "api_index larger than supported by driver\n");
-		return -EINVAL;
+		/* don't return an error so we can load FW that has more bits */
+		return 0;
 	}
 
-	capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+	for (i = 0; i < 32; i++) {
+		if (api_flags & BIT(i))
+			__set_bit(i + 32 * api_index, capa->_api);
+	}
 
 	return 0;
 }
@@ -439,13 +445,19 @@
 {
 	const struct iwl_ucode_capa *ucode_capa = (void *)data;
 	u32 api_index = le32_to_cpu(ucode_capa->api_index);
+	u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
+	int i;
 
-	if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+	if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
 		IWL_ERR(drv, "api_index larger than supported by driver\n");
-		return -EINVAL;
+		/* don't return an error so we can load FW that has more bits */
+		return 0;
 	}
 
-	capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+	for (i = 0; i < 32; i++) {
+		if (api_flags & BIT(i))
+			__set_bit(i + 32 * api_index, capa->_capa);
+	}
 
 	return 0;
 }
@@ -1148,7 +1160,7 @@
 	if (err)
 		goto try_again;
 
-	if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+	if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
 		api_ver = drv->fw.ucode_ver;
 	else
 		api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@@ -1239,6 +1251,8 @@
 		sizeof(struct iwl_fw_dbg_trigger_txq_timer);
 	trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
 		sizeof(struct iwl_fw_dbg_trigger_time_event);
+	trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
+		sizeof(struct iwl_fw_dbg_trigger_ba);
 
 	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
 		if (pieces->dbg_trigger_tlv[i]) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d45dc02..d560648 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,12 +438,6 @@
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  * 	host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 251bf8d..e57dbd0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@
  *	detection.
  * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
  *	events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
  */
 enum iwl_fw_dbg_trigger {
 	FW_DBG_TRIGGER_INVALID = 0,
@@ -267,6 +268,7 @@
 	FW_DBG_TRIGGER_RSSI,
 	FW_DBG_TRIGGER_TXQ_TIMERS,
 	FW_DBG_TRIGGER_TIME_EVENT,
+	FW_DBG_TRIGGER_BA,
 
 	/* must be last */
 	FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 62db2e5..a9b5ae4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -237,6 +237,8 @@
 	IWL_UCODE_TLV_FLAGS_GO_UAPSD		= BIT(30),
 };
 
+typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@@ -255,22 +257,27 @@
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
  * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ *	instead of 3.
  */
 enum iwl_ucode_tlv_api {
-	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
-	IWL_UCODE_TLV_API_FRAGMENTED_SCAN	= BIT(8),
-	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= BIT(9),
-	IWL_UCODE_TLV_API_HDC_PHASE_0		= BIT(10),
-	IWL_UCODE_TLV_API_TX_POWER_DEV		= BIT(11),
-	IWL_UCODE_TLV_API_BASIC_DWELL		= BIT(13),
-	IWL_UCODE_TLV_API_SCD_CFG		= BIT(15),
-	IWL_UCODE_TLV_API_SINGLE_SCAN_EBS	= BIT(16),
-	IWL_UCODE_TLV_API_ASYNC_DTM		= BIT(17),
-	IWL_UCODE_TLV_API_LQ_SS_PARAMS		= BIT(18),
-	IWL_UCODE_TLV_API_STATS_V10		= BIT(19),
-	IWL_UCODE_TLV_API_NEW_VERSION		= BIT(20),
+	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
+	IWL_UCODE_TLV_API_FRAGMENTED_SCAN	= (__force iwl_ucode_tlv_api_t)8,
+	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= (__force iwl_ucode_tlv_api_t)9,
+	IWL_UCODE_TLV_API_HDC_PHASE_0		= (__force iwl_ucode_tlv_api_t)10,
+	IWL_UCODE_TLV_API_TX_POWER_DEV		= (__force iwl_ucode_tlv_api_t)11,
+	IWL_UCODE_TLV_API_BASIC_DWELL		= (__force iwl_ucode_tlv_api_t)13,
+	IWL_UCODE_TLV_API_SCD_CFG		= (__force iwl_ucode_tlv_api_t)15,
+	IWL_UCODE_TLV_API_SINGLE_SCAN_EBS	= (__force iwl_ucode_tlv_api_t)16,
+	IWL_UCODE_TLV_API_ASYNC_DTM		= (__force iwl_ucode_tlv_api_t)17,
+	IWL_UCODE_TLV_API_LQ_SS_PARAMS		= (__force iwl_ucode_tlv_api_t)18,
+	IWL_UCODE_TLV_API_STATS_V10		= (__force iwl_ucode_tlv_api_t)19,
+	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
+	IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY	= (__force iwl_ucode_tlv_api_t)24,
 };
 
+typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+
 /**
  * enum iwl_ucode_tlv_capa - ucode capabilities
  * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@@ -290,6 +297,7 @@
  *	which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -299,22 +307,23 @@
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  */
 enum iwl_ucode_tlv_capa {
-	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= BIT(0),
-	IWL_UCODE_TLV_CAPA_LAR_SUPPORT			= BIT(1),
-	IWL_UCODE_TLV_CAPA_UMAC_SCAN			= BIT(2),
-	IWL_UCODE_TLV_CAPA_BEAMFORMER			= BIT(3),
-	IWL_UCODE_TLV_CAPA_TDLS_SUPPORT			= BIT(6),
-	IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT	= BIT(8),
-	IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT	= BIT(9),
-	IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT	= BIT(10),
-	IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT		= BIT(11),
-	IWL_UCODE_TLV_CAPA_DQA_SUPPORT			= BIT(12),
-	IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH		= BIT(13),
-	IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT		= BIT(18),
-	IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS		= BIT(22),
-	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= BIT(28),
-	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= BIT(29),
-	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= BIT(30),
+	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
+	IWL_UCODE_TLV_CAPA_LAR_SUPPORT			= (__force iwl_ucode_tlv_capa_t)1,
+	IWL_UCODE_TLV_CAPA_UMAC_SCAN			= (__force iwl_ucode_tlv_capa_t)2,
+	IWL_UCODE_TLV_CAPA_BEAMFORMER			= (__force iwl_ucode_tlv_capa_t)3,
+	IWL_UCODE_TLV_CAPA_TDLS_SUPPORT			= (__force iwl_ucode_tlv_capa_t)6,
+	IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT	= (__force iwl_ucode_tlv_capa_t)8,
+	IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT	= (__force iwl_ucode_tlv_capa_t)9,
+	IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT	= (__force iwl_ucode_tlv_capa_t)10,
+	IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT		= (__force iwl_ucode_tlv_capa_t)11,
+	IWL_UCODE_TLV_CAPA_DQA_SUPPORT			= (__force iwl_ucode_tlv_capa_t)12,
+	IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH		= (__force iwl_ucode_tlv_capa_t)13,
+	IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)18,
+	IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT		= (__force iwl_ucode_tlv_capa_t)19,
+	IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS		= (__force iwl_ucode_tlv_capa_t)22,
+	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= (__force iwl_ucode_tlv_capa_t)28,
+	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
+	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -325,13 +334,14 @@
 /* The default max probe length if not specified by the firmware file */
 #define IWL_DEFAULT_MAX_PROBE_LENGTH	200
 
+#define IWL_API_MAX_BITS		64
+#define IWL_CAPABILITIES_MAX_BITS	64
+
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
 #define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE	1
-#define IWL_CAPABILITIES_ARRAY_SIZE	1
 #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -424,11 +434,13 @@
  * @SMEM_MODE: monitor stores the data in SMEM
  * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
  * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
  */
 enum iwl_fw_dbg_monitor_mode {
 	SMEM_MODE = 0,
 	EXTERNAL_MODE = 1,
 	MARBH_MODE = 2,
+	MIPI_MODE = 3,
 };
 
 /**
@@ -436,6 +448,7 @@
  *
  * @version: version of the TLV - currently 0
  * @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
  * @base_reg: addr of the base addr register (PRPH)
  * @end_reg:  addr of the end addr register (PRPH)
  * @write_ptr_reg: the addr of the reg of the write pointer
@@ -449,7 +462,8 @@
 struct iwl_fw_dbg_dest_tlv {
 	u8 version;
 	u8 monitor_mode;
-	u8 reserved[2];
+	u8 size_power;
+	u8 reserved;
 	__le32 base_reg;
 	__le32 end_reg;
 	__le32 write_ptr_reg;
@@ -659,6 +673,33 @@
 } __packed;
 
 /**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *	when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *	when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *	when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *	when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ *	when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ *	when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ *	when a frame times out in the reodering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+	__le16 rx_ba_start;
+	__le16 rx_ba_stop;
+	__le16 tx_ba_start;
+	__le16 tx_ba_stop;
+	__le16 rx_bar;
+	__le16 tx_bar;
+	__le16 frame_timeout;
+} __packed;
+
+/**
  * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
  * @id: conf id
  * @usniffer: should the uSniffer image be used
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index cf75baf..3e3c9d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -105,10 +105,24 @@
 	u32 n_scan_channels;
 	u32 standard_phy_calibration_size;
 	u32 flags;
-	u32 api[IWL_API_ARRAY_SIZE];
-	u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
+	unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
+	unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
 };
 
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+	   iwl_ucode_tlv_api_t api)
+{
+	return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+	    iwl_ucode_tlv_capa_t capa)
+{
+	return test_bit((__force long)capa, capabilities->_capa);
+}
+
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
 struct fw_desc {
 	const void *data;	/* vmalloc'ed data */
@@ -205,6 +219,8 @@
 		return "EXTERNAL_DRAM";
 	case MARBH_MODE:
 		return "MARBH";
+	case MIPI_MODE:
+		return "MIPI";
 	default:
 		return "UNKNOWN";
 	}
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 8e604a3..80fefe7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -249,7 +249,7 @@
 	 */
 	if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
 	    (flags & IEEE80211_CHAN_NO_IR))
-		flags |= IEEE80211_CHAN_GO_CONCURRENT;
+		flags |= IEEE80211_CHAN_IR_CONCURRENT;
 
 	return flags;
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 88a57e6..5af1c77 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -348,6 +348,9 @@
 #define MON_BUFF_WRPTR			(0xa03c44)
 #define MON_BUFF_CYCLE_CNT		(0xa03c48)
 
+#define MON_DMARB_RD_CTL_ADDR		(0xa03c60)
+#define MON_DMARB_RD_DATA_ADDR		(0xa03c5c)
+
 #define DBGC_IN_SAMPLE			(0xa03c00)
 
 /* enable the ID buf for read */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644
index 0000000..9f8bcef
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include "iwl-trans.h"
+
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+				  struct device *dev,
+				  const struct iwl_cfg *cfg,
+				  const struct iwl_trans_ops *ops,
+				  size_t dev_cmd_headroom)
+{
+	struct iwl_trans *trans;
+#ifdef CONFIG_LOCKDEP
+	static struct lock_class_key __key;
+#endif
+
+	trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+	if (!trans)
+		return NULL;
+
+#ifdef CONFIG_LOCKDEP
+	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+			 &__key, 0);
+#endif
+
+	trans->dev = dev;
+	trans->cfg = cfg;
+	trans->ops = ops;
+	trans->dev_cmd_headroom = dev_cmd_headroom;
+
+	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+		 "iwl_cmd_pool:%s", dev_name(trans->dev));
+	trans->dev_cmd_pool =
+		kmem_cache_create(trans->dev_cmd_pool_name,
+				  sizeof(struct iwl_device_cmd)
+				  + trans->dev_cmd_headroom,
+				  sizeof(void *),
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+	if (!trans->dev_cmd_pool)
+		goto free;
+
+	return trans;
+ free:
+	kfree(trans);
+	return NULL;
+}
+
+void iwl_trans_free(struct iwl_trans *trans)
+{
+	kmem_cache_destroy(trans->dev_cmd_pool);
+	kfree(trans);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 56254a8..87a230a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -641,6 +641,8 @@
 
 	enum iwl_d0i3_mode d0i3_mode;
 
+	bool wowlan_d0i3;
+
 	/* pointer to trans specific struct */
 	/*Ensure that this pointer will always be aligned to sizeof pointer */
 	char trans_specific[0] __aligned(sizeof(void *));
@@ -1011,19 +1013,19 @@
 }
 
 /*****************************************************
+ * transport helper functions
+ *****************************************************/
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+				  struct device *dev,
+				  const struct iwl_cfg *cfg,
+				  const struct iwl_trans_ops *ops,
+				  size_t dev_cmd_headroom);
+void iwl_trans_free(struct iwl_trans *trans);
+
+/*****************************************************
 * driver (transport) register/unregister functions
 ******************************************************/
 int __must_check iwl_pci_register_driver(void);
 void iwl_pci_unregister_driver(void);
 
-static inline void trans_lockdep_init(struct iwl_trans *trans)
-{
-#ifdef CONFIG_LOCKDEP
-	static struct lock_class_key __key;
-
-	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
-			 &__key, 0);
-#endif
-}
-
 #endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 13a0a03..b4737e2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -408,23 +408,12 @@
 
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 {
-	struct iwl_bt_coex_cmd *bt_cmd;
-	struct iwl_host_cmd cmd = {
-		.id = BT_CONFIG,
-		.len = { sizeof(*bt_cmd), },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	int ret;
+	struct iwl_bt_coex_cmd bt_cmd = {};
 	u32 mode;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_send_bt_init_conf_old(mvm);
 
-	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-	if (!bt_cmd)
-		return -ENOMEM;
-	cmd.data[0] = bt_cmd;
-
 	lockdep_assert_held(&mvm->mutex);
 
 	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -440,36 +429,33 @@
 			mode = 0;
 		}
 
-		bt_cmd->mode = cpu_to_le32(mode);
+		bt_cmd.mode = cpu_to_le32(mode);
 		goto send_cmd;
 	}
 
 	mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
-	bt_cmd->mode = cpu_to_le32(mode);
+	bt_cmd.mode = cpu_to_le32(mode);
 
 	if (IWL_MVM_BT_COEX_SYNC2SCO)
-		bt_cmd->enabled_modules |=
+		bt_cmd.enabled_modules |=
 			cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
 
 	if (iwl_mvm_bt_is_plcr_supported(mvm))
-		bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
+		bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
 
 	if (IWL_MVM_BT_COEX_MPLUT) {
-		bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
-		bt_cmd->enabled_modules |=
+		bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+		bt_cmd.enabled_modules |=
 			cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
 	}
 
-	bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+	bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
 
 send_cmd:
 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
 	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-	kfree(bt_cmd);
-	return ret;
+	return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
 }
 
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@@ -746,7 +732,7 @@
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
 
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@@ -770,52 +756,14 @@
 	return 0;
 }
 
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-				   struct ieee80211_vif *vif)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_bt_iterator_data *data = _data;
-	struct iwl_mvm *mvm = data->mvm;
-
-	struct ieee80211_sta *sta;
-	struct iwl_mvm_sta *mvmsta;
-
-	struct ieee80211_chanctx_conf *chanctx_conf;
-
-	rcu_read_lock();
-	chanctx_conf = rcu_dereference(vif->chanctx_conf);
-	/* If channel context is invalid or not on 2.4GHz - don't count it */
-	if (!chanctx_conf ||
-	    chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-		rcu_read_unlock();
-		return;
-	}
-	rcu_read_unlock();
-
-	if (vif->type != NL80211_IFTYPE_STATION ||
-	    mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-		return;
-
-	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-					lockdep_is_held(&mvm->mutex));
-
-	/* This can happen if the station has been removed right now */
-	if (IS_ERR_OR_NULL(sta))
-		return;
-
-	mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			   enum ieee80211_rssi_event_data rssi_event)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_bt_iterator_data data = {
-		.mvm = mvm,
-	};
 	int ret;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
 		iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
 		return;
 	}
@@ -853,10 +801,6 @@
 
 	if (ret)
 		IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-	ieee80211_iterate_active_interfaces_atomic(
-		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-		iwl_mvm_bt_rssi_iterator, &data);
 }
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000)
@@ -870,7 +814,7 @@
 	struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
 	enum iwl_bt_coex_lut_type lut_type;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
 
 	if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -897,7 +841,7 @@
 	struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
 	enum iwl_bt_coex_lut_type lut_type;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
 
 	if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -927,7 +871,7 @@
 	if (ant & mvm->cfg->non_shared_ant)
 		return true;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
 
 	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@@ -940,10 +884,10 @@
 	if (mvm->cfg->bt_shared_single_ant)
 		return true;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
 
-	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
+	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -951,7 +895,7 @@
 {
 	u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
 
 	if (band != IEEE80211_BAND_2GHZ)
@@ -994,7 +938,8 @@
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
 		iwl_mvm_bt_coex_vif_change_old(mvm);
 		return;
 	}
@@ -1012,7 +957,7 @@
 	u8 __maybe_unused lower_bound, upper_bound;
 	u8 lut;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
 		return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
 
 	if (!iwl_mvm_bt_is_plcr_supported(mvm))
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 4310cf1..4165d10 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -761,7 +761,7 @@
 
 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
 {
-	iwl_mvm_cancel_scan(mvm);
+	iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
 	iwl_trans_stop_device(mvm->trans);
 
@@ -981,7 +981,8 @@
 	if (ret)
 		return ret;
 
-	ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
+	ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+				       IWL_MVM_SCAN_NETDETECT);
 	if (ret)
 		return ret;
 
@@ -1169,7 +1170,8 @@
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
 	iwl_trans_suspend(mvm->trans);
-	if (wowlan->any) {
+	mvm->trans->wowlan_d0i3 = wowlan->any;
+	if (mvm->trans->wowlan_d0i3) {
 		/* 'any' trigger means d0i3 usage */
 		if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
 			int ret = iwl_mvm_enter_d0i3_sync(mvm);
@@ -1784,7 +1786,7 @@
 	for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
 		struct iwl_scan_offload_profile_match *fw_match;
 		struct cfg80211_wowlan_nd_match *match;
-		int n_channels = 0;
+		int idx, n_channels = 0;
 
 		fw_match = &query.matches[i];
 
@@ -1799,8 +1801,12 @@
 
 		net_detect->matches[net_detect->n_matches++] = match;
 
-		match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
-		memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+		/* We inverted the order of the SSIDs in the scan
+		 * request, so invert the index here.
+		 */
+		idx = mvm->n_nd_match_sets - i - 1;
+		match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+		memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
 		       match->ssid.ssid_len);
 
 		if (mvm->n_nd_channels < n_channels)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5f37eab..5c8a65d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -190,6 +190,21 @@
 	return ret ?: count;
 }
 
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	char buf[64];
+	int bufsz = sizeof(buf);
+	int pos;
+
+	pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+			vif->bss_conf.txpower);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
 					char __user *user_buf,
 					size_t count, loff_t *ppos)
@@ -607,6 +622,7 @@
 	} while (0)
 
 MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@@ -641,6 +657,7 @@
 		MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
 					 S_IRUSR);
 
+	MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
 	MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
 	MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
 				 S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 9ac04c1..ffb4b5c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -493,7 +493,8 @@
 
 	mutex_lock(&mvm->mutex);
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
 		struct iwl_bt_coex_profile_notif_old *notif =
 			&mvm->last_bt_notif_old;
 
@@ -550,7 +551,8 @@
 
 	mutex_lock(&mvm->mutex);
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
 		struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
 
 		pos += scnprintf(buf+pos, bufsz-pos,
@@ -916,7 +918,8 @@
 
 	if (mvm->scan_rx_ant != scan_rx_ant) {
 		mvm->scan_rx_ant = scan_rx_ant;
-		if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_UMAC_SCAN))
 			iwl_mvm_config_scan(mvm);
 	}
 
@@ -1356,6 +1359,7 @@
 	PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
 	PRINT_MVM_REF(IWL_MVM_REF_SCAN);
 	PRINT_MVM_REF(IWL_MVM_REF_ROC);
+	PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
 	PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
 	PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
 	PRINT_MVM_REF(IWL_MVM_REF_USER);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d6cced4..5e4cbdb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -274,50 +274,18 @@
 } __packed;
 
 /**
- * iwl_scan_offload_schedule - schedule of scan offload
+ * iwl_scan_schedule_lmac - schedule of scan offload
  * @delay:		delay between iterations, in seconds.
  * @iterations:		num of scan iterations
  * @full_scan_mul:	number of partial scans before each full scan
  */
-struct iwl_scan_offload_schedule {
+struct iwl_scan_schedule_lmac {
 	__le16 delay;
 	u8 iterations;
 	u8 full_scan_mul;
-} __packed;
+} __packed; /* SCAN_SCHEDULE_API_S */
 
-/*
- * iwl_scan_offload_flags
- *
- * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
- * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
- *	beacon period. Finding channel activity in this mode is not guaranteed.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
- *	Assuming beacon period is 100ms finding channel activity is guaranteed.
- */
-enum iwl_scan_offload_flags {
-	IWL_SCAN_OFFLOAD_FLAG_PASS_ALL		= BIT(0),
-	IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL	= BIT(2),
-	IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE	= BIT(5),
-	IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE	= BIT(6),
-};
-
-/**
- * iwl_scan_offload_req - scan offload request command
- * @flags:		bitmap - enum iwl_scan_offload_flags.
- * @watchdog:		maximum scan duration in TU.
- * @delay:		delay in seconds before first iteration.
- * @schedule_line:	scan offload schedule, for fast and regular scan.
- */
-struct iwl_scan_offload_req {
-	__le16 flags;
-	__le16 watchdog;
-	__le16 delay;
-	__le16 reserved;
-	struct iwl_scan_offload_schedule schedule_line[2];
-} __packed;
-
-enum iwl_scan_offload_compleate_status {
+enum iwl_scan_offload_complete_status {
 	IWL_SCAN_OFFLOAD_COMPLETED	= 1,
 	IWL_SCAN_OFFLOAD_ABORTED	= 2,
 };
@@ -326,6 +294,7 @@
 	IWL_SCAN_EBS_SUCCESS,
 	IWL_SCAN_EBS_FAILED,
 	IWL_SCAN_EBS_CHAN_NOT_FOUND,
+	IWL_SCAN_EBS_INACTIVE,
 };
 
 /**
@@ -463,8 +432,19 @@
 	IWL_SCAN_PRIORITY_HIGH,
 };
 
+enum iwl_scan_priority_ext {
+	IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+	IWL_SCAN_PRIORITY_EXT_1,
+	IWL_SCAN_PRIORITY_EXT_2,
+	IWL_SCAN_PRIORITY_EXT_3,
+	IWL_SCAN_PRIORITY_EXT_4,
+	IWL_SCAN_PRIORITY_EXT_5,
+	IWL_SCAN_PRIORITY_EXT_6,
+	IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
 /**
- * iwl_scan_req_unified_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
  * @reserved1: for alignment and future use
  * @channel_num: num of channels to scan
  * @active-dwell: dwell time for active channels
@@ -487,7 +467,7 @@
  * @channel_opt: channel optimization options, for full and partial scan
  * @data: channel configuration and probe request packet.
  */
-struct iwl_scan_req_unified_lmac {
+struct iwl_scan_req_lmac {
 	/* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
 	__le32 reserved1;
 	u8 n_channels;
@@ -508,7 +488,7 @@
 	/* SCAN_REQ_PERIODIC_PARAMS_API_S */
 	__le32 iter_num;
 	__le32 delay;
-	struct iwl_scan_offload_schedule schedule[2];
+	struct iwl_scan_schedule_lmac schedule[2];
 	struct iwl_scan_channel_opt channel_opt[2];
 	u8 data[];
 } __packed;
@@ -582,7 +562,11 @@
 	u8 ver;
 } __packed;
 
-#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ */
+#define IWL_MVM_MAX_UMAC_SCANS 8
+#define IWL_MVM_MAX_LMAC_SCANS 1
 
 enum scan_config_flags {
 	SCAN_CONFIG_FLAG_ACTIVATE			= BIT(0),
@@ -865,4 +849,27 @@
 	struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
 } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
 
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ *	results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_umac_scan_iter_complete_notif {
+	__le32 uid;
+	u8 scanned_channels;
+	u8 status;
+	u8 bt_status;
+	u8 last_channel;
+	__le32 tsf_low;
+	__le32 tsf_high;
+	struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
 #endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 01b1da6..16e9ef4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,6 +108,7 @@
 	ANTENNA_COUPLING_NOTIFICATION = 0xa,
 
 	/* UMAC scan commands */
+	SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
 	SCAN_CFG_CMD = 0xc,
 	SCAN_REQ_UMAC = 0xd,
 	SCAN_ABORT_UMAC = 0xe,
@@ -147,13 +148,6 @@
 
 	LQ_CMD = 0x4e,
 
-	/* Calibration */
-	TEMPERATURE_NOTIFICATION = 0x62,
-	CALIBRATION_CFG_CMD = 0x65,
-	CALIBRATION_RES_NOTIFICATION = 0x66,
-	CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
-	RADIO_VERSION_NOTIFICATION = 0x68,
-
 	/* Scan offload */
 	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 	SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -177,12 +171,8 @@
 	/* Thermal Throttling*/
 	REPLY_THERMAL_MNG_BACKOFF = 0x7e,
 
-	/* Scanning */
-	SCAN_REQUEST_CMD = 0x80,
-	SCAN_ABORT_CMD = 0x81,
-	SCAN_START_NOTIFICATION = 0x82,
-	SCAN_RESULTS_NOTIFICATION = 0x83,
-	SCAN_COMPLETE_NOTIFICATION = 0x84,
+	/* Set/Get DC2DC frequency tune */
+	DC2DC_CONFIG_CMD = 0x83,
 
 	/* NVM */
 	NVM_ACCESS_CMD = 0x88,
@@ -1402,6 +1392,49 @@
 	__le32 metadata[0];
 } __packed; /* MARKER_API_S_VER_1 */
 
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+	DCDC_LOW_POWER_MODE_MSK_SET  = 0x1, /* not used */
+	DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_cmd - configure dc2dc values
+ *
+ * (DC2DC_CONFIG_CMD = 0x83)
+ *
+ * Set/Get & configure dc2dc values.
+ * The command always returns the current dc2dc values.
+ *
+ * @flags: set/get dc2dc
+ * @enable_low_power_mode: not used.
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_cmd {
+	__le32 flags;
+	__le32 enable_low_power_mode; /* not used */
+	__le32 dc2dc_freq_tune0;
+	__le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
+ *
+ * Current dc2dc values returned by the FW.
+ *
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_resp {
+	__le32 dc2dc_freq_tune0;
+	__le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
+
 /***********************************
  * Smart Fifo API
  ***********************************/
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index df86963..eb10c5e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -623,7 +623,7 @@
 	if (!mvm->trans->ltr_enabled)
 		return 0;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
 		return iwl_mvm_config_ltr_v1(mvm);
 
 	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
@@ -662,9 +662,9 @@
 		 * device that are triggered by the INIT firwmare (MFUART).
 		 */
 		_iwl_trans_stop_device(mvm->trans, false);
-		_iwl_trans_start_hw(mvm->trans, false);
+		ret = _iwl_trans_start_hw(mvm->trans, false);
 		if (ret)
-			return ret;
+			goto error;
 	}
 
 	if (iwlmvm_mod_params.init_dbg)
@@ -754,7 +754,7 @@
 			goto error;
 	}
 
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
 		ret = iwl_mvm_config_scan(mvm);
 		if (ret)
 			goto error;
@@ -832,21 +832,6 @@
 	return 0;
 }
 
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			 struct iwl_device_cmd *cmd)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
-
-	/* TODO: what to do with that? */
-	IWL_DEBUG_INFO(mvm,
-		       "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
-		       le32_to_cpu(radio_version->radio_flavor),
-		       le32_to_cpu(radio_version->radio_step),
-		       le32_to_cpu(radio_version->radio_dash));
-	return 0;
-}
-
 int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
 			    struct iwl_rx_cmd_buffer *rxb,
 			    struct iwl_device_cmd *cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8088c71..1812dd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -852,7 +852,7 @@
 				       MAC_FILTER_IN_BEACON |
 				       MAC_FILTER_IN_PROBE_REQUEST |
 				       MAC_FILTER_IN_CRC32);
-	mvm->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+	ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
 
 	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
@@ -1270,7 +1270,7 @@
 	mvmvif->uploaded = false;
 
 	if (vif->type == NL80211_IFTYPE_MONITOR)
-		mvm->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+		__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dda9f7b..dfdab38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -80,7 +80,6 @@
 #include "sta.h"
 #include "time-event.h"
 #include "iwl-eeprom-parse.h"
-#include "fw-api-scan.h"
 #include "iwl-phy-db.h"
 #include "testmode.h"
 #include "iwl-fw-error-dump.h"
@@ -319,7 +318,7 @@
 	resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
 	if (IS_ERR_OR_NULL(resp)) {
 		IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
-			      PTR_RET(resp));
+			      PTR_ERR_OR_ZERO(resp));
 		goto out;
 	}
 
@@ -335,7 +334,7 @@
 	kfree(resp);
 	if (IS_ERR_OR_NULL(regd)) {
 		IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
-			      PTR_RET(regd));
+			      PTR_ERR_OR_ZERO(regd));
 		goto out;
 	}
 
@@ -416,20 +415,27 @@
 {
 	struct ieee80211_hw *hw = mvm->hw;
 	int num_mac, ret, i;
+	static const u32 mvm_ciphers[] = {
+		WLAN_CIPHER_SUITE_WEP40,
+		WLAN_CIPHER_SUITE_WEP104,
+		WLAN_CIPHER_SUITE_TKIP,
+		WLAN_CIPHER_SUITE_CCMP,
+	};
 
 	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_SPECTRUM_MGMT |
-		    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		    IEEE80211_HW_QUEUE_CONTROL |
-		    IEEE80211_HW_WANT_MONITOR_VIF |
-		    IEEE80211_HW_SUPPORTS_PS |
-		    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_TIMING_BEACON_ONLY |
-		    IEEE80211_HW_CONNECTION_MONITOR |
-		    IEEE80211_HW_CHANCTX_STA_CSA |
-		    IEEE80211_HW_SUPPORTS_CLONED_SKBS;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+	ieee80211_hw_set(hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
 
 	hw->queues = mvm->first_agg_queue;
 	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -441,19 +447,38 @@
 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
+	BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
+	memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+	hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+	hw->wiphy->cipher_suites = mvm->ciphers;
+
 	/*
 	 * Enable 11w if advertised by firmware and software crypto
 	 * is not enabled (as the firmware will interpret some mgmt
 	 * packets, so enabling it with software crypto isn't safe)
 	 */
 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
-	    !iwlwifi_mod_params.sw_crypto)
-		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+	    !iwlwifi_mod_params.sw_crypto) {
+		ieee80211_hw_set(hw, MFP_CAPABLE);
+		mvm->ciphers[hw->wiphy->n_cipher_suites] =
+			WLAN_CIPHER_SUITE_AES_CMAC;
+		hw->wiphy->n_cipher_suites++;
+	}
 
-	hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+	/* currently FW API supports only one optional cipher scheme */
+	if (mvm->fw->cs[0].cipher) {
+		mvm->hw->n_cipher_schemes = 1;
+		mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+		mvm->ciphers[hw->wiphy->n_cipher_suites] =
+			mvm->fw->cs[0].cipher;
+		hw->wiphy->n_cipher_suites++;
+	}
+
+	ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
 	hw->wiphy->features |=
 		NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
-		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+		NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
 
 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -506,10 +531,19 @@
 
 	iwl_mvm_reset_phy_ctxts(mvm);
 
-	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
+	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
 
 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
 
+	BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+	BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+		     IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+	else
+		mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+
 	if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
 			&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
@@ -517,10 +551,10 @@
 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
 			&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
 
-		if ((mvm->fw->ucode_capa.capa[0] &
-		     IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
-		    (mvm->fw->ucode_capa.api[0] &
-		     IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+		    fw_has_api(&mvm->fw->ucode_capa,
+			       IWL_UCODE_TLV_API_LQ_SS_PARAMS))
 			hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
 	}
@@ -532,14 +566,12 @@
 	else
 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
-		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-		hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-		hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-		/* we create the 802.11 header and zero length SSID IE. */
-		hw->wiphy->max_sched_scan_ie_len =
-			SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
-	}
+	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+	/* we create the 802.11 header and zero length SSID IE. */
+	hw->wiphy->max_sched_scan_ie_len =
+		SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 
 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
 			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -548,30 +580,24 @@
 			       NL80211_FEATURE_STATIC_SMPS |
 			       NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
 
-	if (mvm->fw->ucode_capa.capa[0] &
-	    IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
 		hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
-	if (mvm->fw->ucode_capa.capa[0] &
-	    IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
 		hw->wiphy->features |= NL80211_FEATURE_QUIET;
 
-	if (mvm->fw->ucode_capa.capa[0] &
-	    IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
 		hw->wiphy->features |=
 			NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
 
-	if (mvm->fw->ucode_capa.capa[0] &
-	    IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
 
 	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 
-	/* currently FW API supports only one optional cipher scheme */
-	if (mvm->fw->cs[0].cipher) {
-		mvm->hw->n_cipher_schemes = 1;
-		mvm->hw->cipher_schemes = &mvm->fw->cs[0];
-	}
-
 #ifdef CONFIG_PM_SLEEP
 	if (iwl_mvm_is_d0i3_supported(mvm) &&
 	    device_can_wakeup(mvm->trans->dev)) {
@@ -611,13 +637,14 @@
 	if (ret)
 		return ret;
 
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
 	}
 
-	if (mvm->fw->ucode_capa.capa[0] &
-	    IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
 		IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 	}
@@ -730,6 +757,60 @@
 	return true;
 }
 
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)	\
+	do {							\
+		if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))	\
+			break;					\
+		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);	\
+	} while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+			    enum ieee80211_ampdu_mlme_action action)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+		return;
+
+	switch (action) {
+	case IEEE80211_AMPDU_TX_OPERATIONAL: {
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+				 "TX AGG START: MAC %pM tid %d ssn %d\n",
+				 sta->addr, tid, tid_data->ssn);
+		break;
+		}
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+				 "TX AGG STOP: MAC %pM tid %d\n",
+				 sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_RX_START:
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+				 "RX AGG START: MAC %pM tid %d ssn %d\n",
+				 sta->addr, tid, rx_ba_ssn);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+				 "RX AGG STOP: MAC %pM tid %d\n",
+				 sta->addr, tid);
+		break;
+	default:
+		break;
+	}
+}
+
 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
 				    struct ieee80211_vif *vif,
 				    enum ieee80211_ampdu_mlme_action action,
@@ -806,6 +887,16 @@
 		ret = -EINVAL;
 		break;
 	}
+
+	if (!ret) {
+		u16 rx_ba_ssn = 0;
+
+		if (action == IEEE80211_AMPDU_RX_START)
+			rx_ba_ssn = *ssn;
+
+		iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+					    rx_ba_ssn, action);
+	}
 	mutex_unlock(&mvm->mutex);
 
 	/*
@@ -1227,22 +1318,23 @@
 
 	iwl_trans_stop_device(mvm->trans);
 
-	mvm->scan_status = IWL_MVM_SCAN_NONE;
+	mvm->scan_status = 0;
 	mvm->ps_disabled = false;
 	mvm->calibrating = false;
 
 	/* just in case one was running */
 	ieee80211_remain_on_channel_expired(mvm->hw);
 
-	ieee80211_iterate_active_interfaces_atomic(
-		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-		iwl_mvm_cleanup_iterator, mvm);
+	/*
+	 * cleanup all interfaces, even inactive ones, as some might have
+	 * gone down during the HW restart
+	 */
+	ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
 
 	mvm->p2p_device_vif = NULL;
 	mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
 
 	iwl_mvm_reset_phy_ctxts(mvm);
-	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
 	memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
@@ -1404,7 +1496,7 @@
 	 * The work item could be running or queued if the
 	 * ROC time event stops just as we get here.
 	 */
-	cancel_work_sync(&mvm->roc_done_wk);
+	flush_work(&mvm->roc_done_wk);
 
 	iwl_trans_stop_device(mvm->trans);
 
@@ -1417,20 +1509,24 @@
 	/*
 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
 	 * won't be called in this case).
+	 * But make sure to cleanup interfaces that have gone down before/during
+	 * HW restart was requested.
 	 */
-	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+	if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+		ieee80211_iterate_interfaces(mvm->hw, 0,
+					     iwl_mvm_cleanup_iterator, mvm);
 
 	/* We shouldn't have any UIDs still set.  Loop over all the UIDs to
 	 * make sure there's nothing left there and warn if any is found.
 	 */
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
 		int i;
 
-		for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
-			if (WARN_ONCE(mvm->scan_uid[i],
-				      "UMAC scan UID %d was not cleaned\n",
-				      mvm->scan_uid[i]))
-				mvm->scan_uid[i] = 0;
+		for (i = 0; i < mvm->max_scans; i++) {
+			if (WARN_ONCE(mvm->scan_uid_status[i],
+				      "UMAC scan UID %d status was not cleaned\n",
+				      i))
+				mvm->scan_uid_status[i] = 0;
 		}
 	}
 
@@ -1495,7 +1591,7 @@
 		.pwr_restriction = cpu_to_le16(8 * tx_power),
 	};
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
 		return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 
 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@@ -2354,7 +2450,7 @@
 	mutex_lock(&mvm->mutex);
 
 	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
-		iwl_mvm_scan_offload_stop(mvm, true);
+		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_STATION:
@@ -2373,89 +2469,21 @@
 	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
 }
 
-static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
-					  enum iwl_scan_status scan_type)
-{
-	int ret;
-	bool wait_for_handlers = false;
-
-	mutex_lock(&mvm->mutex);
-
-	if (mvm->scan_status != scan_type) {
-		ret = 0;
-		/* make sure there are no pending notifications */
-		wait_for_handlers = true;
-		goto out;
-	}
-
-	switch (scan_type) {
-	case IWL_MVM_SCAN_SCHED:
-		ret = iwl_mvm_scan_offload_stop(mvm, true);
-		break;
-	case IWL_MVM_SCAN_OS:
-		ret = iwl_mvm_cancel_scan(mvm);
-		break;
-	case IWL_MVM_SCAN_NONE:
-	default:
-		WARN_ON_ONCE(1);
-		ret = -EINVAL;
-		break;
-	}
-	if (ret)
-		goto out;
-
-	wait_for_handlers = true;
-out:
-	mutex_unlock(&mvm->mutex);
-
-	/* make sure we consume the completion notification */
-	if (wait_for_handlers)
-		iwl_mvm_wait_for_async_handlers(mvm);
-
-	return ret;
-}
 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       struct ieee80211_scan_request *hw_req)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-	struct cfg80211_scan_request *req = &hw_req->req;
 	int ret;
 
-	if (req->n_channels == 0 ||
-	    req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
+	if (hw_req->req.n_channels == 0 ||
+	    hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
 		return -EINVAL;
 
-	if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-		ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
-		if (ret)
-			return ret;
-	}
-
 	mutex_lock(&mvm->mutex);
-
-	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
-		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
-		ret = -EBUSY;
-		goto out;
-	}
-
-	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
-
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-		ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
-	else
-		ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
-
-	if (ret)
-		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-out:
+	ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
 	mutex_unlock(&mvm->mutex);
+
 	return ret;
 }
 
@@ -2473,12 +2501,8 @@
 	 * cancel scan scan before ieee80211_scan_work() could run.
 	 * To handle that, simply return if the scan is not running.
 	*/
-	/* FIXME: for now, we ignore this race for UMAC scans, since
-	 * they don't set the scan_status.
-	 */
-	if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
-	    (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
-		iwl_mvm_cancel_scan(mvm);
+	if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
 	mutex_unlock(&mvm->mutex);
 }
@@ -2794,35 +2818,17 @@
 					struct ieee80211_scan_ies *ies)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
 	int ret;
 
-	if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-		ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
-		if (ret)
-			return ret;
-	}
-
 	mutex_lock(&mvm->mutex);
 
-	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
-		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
-		ret = -EBUSY;
-		goto out;
-	}
-
 	if (!vif->bss_conf.idle) {
 		ret = -EBUSY;
 		goto out;
 	}
 
-	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
-	if (ret)
-		mvm->scan_status = IWL_MVM_SCAN_NONE;
+	ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
 
 out:
 	mutex_unlock(&mvm->mutex);
@@ -2845,16 +2851,12 @@
 	 * could run.  To handle this, simply return if the scan is
 	 * not running.
 	*/
-	/* FIXME: for now, we ignore this race for UMAC scans, since
-	 * they don't set the scan_status.
-	 */
-	if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
-	    !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+	if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
 		mutex_unlock(&mvm->mutex);
 		return 0;
 	}
 
-	ret = iwl_mvm_scan_offload_stop(mvm, false);
+	ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
 	mutex_unlock(&mvm->mutex);
 	iwl_mvm_wait_for_async_handlers(mvm);
 
@@ -2883,7 +2885,7 @@
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
 		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
-		WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
+		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
 		break;
 	case WLAN_CIPHER_SUITE_WEP40:
 	case WLAN_CIPHER_SUITE_WEP104:
@@ -2922,8 +2924,21 @@
 			break;
 		}
 
+		/* During FW restart, in order to restore the state as it was,
+		 * don't try to reprogram keys we previously failed for.
+		 */
+		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		    key->hw_key_idx == STA_KEY_IDX_INVALID) {
+			IWL_DEBUG_MAC80211(mvm,
+					   "skip invalid idx key programming during restart\n");
+			ret = 0;
+			break;
+		}
+
 		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
-		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
+					  test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+						   &mvm->status));
 		if (ret) {
 			IWL_WARN(mvm, "set key failed\n");
 			/*
@@ -3001,7 +3016,7 @@
 	return true;
 }
 
-#define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
+#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
 				    struct ieee80211_channel *channel,
 				    struct ieee80211_vif *vif,
@@ -3106,8 +3121,8 @@
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_STATION:
-		if (mvm->fw->ucode_capa.capa[0] &
-		    IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
 			/* Use aux roc framework (HS20) */
 			ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
 						       vif, duration);
@@ -3899,7 +3914,7 @@
 	if (idx != 0)
 		return -ENOENT;
 
-	if (!(mvm->fw->ucode_capa.capa[0] &
+	if (fw_has_capa(&mvm->fw->ucode_capa,
 			IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
 		return -ENOENT;
 
@@ -3946,8 +3961,8 @@
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-	if (!(mvm->fw->ucode_capa.capa[0] &
-				IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
 		return;
 
 	/* if beacon filtering isn't on mac80211 does it anyway */
@@ -3977,9 +3992,9 @@
 	mutex_unlock(&mvm->mutex);
 }
 
-static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
-				       struct ieee80211_vif *vif,
-				       const struct ieee80211_event *event)
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					const struct ieee80211_event *event)
 {
 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)	\
 	do {							\
@@ -3988,7 +4003,6 @@
 		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
 	} while (0)
 
-	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	struct iwl_fw_dbg_trigger_tlv *trig;
 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 
@@ -4032,6 +4046,75 @@
 #undef CHECK_MLME_TRIGGER
 }
 
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  const struct ieee80211_event *event)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+		return;
+
+	if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+		return;
+
+	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+				    "BAR received from %pM, tid %d, ssn %d",
+				    event->u.ba.sta->addr, event->u.ba.tid,
+				    event->u.ba.ssn);
+}
+
+static void
+iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif,
+				     const struct ieee80211_event *event)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+		return;
+
+	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
+		return;
+
+	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+				    "Frame from %pM timed out, tid %d",
+				    event->u.ba.sta->addr, event->u.ba.tid);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       const struct ieee80211_event *event)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	switch (event->type) {
+	case MLME_EVENT:
+		iwl_mvm_event_mlme_callback(mvm, vif, event);
+		break;
+	case BAR_RX_EVENT:
+		iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+		break;
+	case BA_FRAME_TIMEOUT:
+		iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+		break;
+	default:
+		break;
+	}
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
 	.tx = iwl_mvm_mac_tx,
 	.ampdu_action = iwl_mvm_mac_ampdu_action,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index cf70f68..2d4bad5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,7 @@
 #include "iwl-notif-wait.h"
 #include "iwl-eeprom-parse.h"
 #include "iwl-fw-file.h"
+#include "iwl-config.h"
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
@@ -275,6 +276,7 @@
 	IWL_MVM_REF_UCODE_DOWN,
 	IWL_MVM_REF_SCAN,
 	IWL_MVM_REF_ROC,
+	IWL_MVM_REF_ROC_AUX,
 	IWL_MVM_REF_P2P_CLIENT,
 	IWL_MVM_REF_AP_IBSS,
 	IWL_MVM_REF_USER,
@@ -445,10 +447,26 @@
 
 extern const u8 tid_to_mac80211_ac[];
 
+#define IWL_MVM_SCAN_STOPPING_SHIFT	8
+
 enum iwl_scan_status {
-	IWL_MVM_SCAN_NONE,
-	IWL_MVM_SCAN_OS,
-	IWL_MVM_SCAN_SCHED,
+	IWL_MVM_SCAN_REGULAR		= BIT(0),
+	IWL_MVM_SCAN_SCHED		= BIT(1),
+	IWL_MVM_SCAN_NETDETECT		= BIT(2),
+
+	IWL_MVM_SCAN_STOPPING_REGULAR	= BIT(8),
+	IWL_MVM_SCAN_STOPPING_SCHED	= BIT(9),
+	IWL_MVM_SCAN_STOPPING_NETDETECT	= BIT(10),
+
+	IWL_MVM_SCAN_REGULAR_MASK	= IWL_MVM_SCAN_REGULAR |
+					  IWL_MVM_SCAN_STOPPING_REGULAR,
+	IWL_MVM_SCAN_SCHED_MASK		= IWL_MVM_SCAN_SCHED |
+					  IWL_MVM_SCAN_STOPPING_SCHED,
+	IWL_MVM_SCAN_NETDETECT_MASK	= IWL_MVM_SCAN_NETDETECT |
+					  IWL_MVM_SCAN_STOPPING_NETDETECT,
+
+	IWL_MVM_SCAN_STOPPING_MASK	= 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+	IWL_MVM_SCAN_MASK		= 0xff,
 };
 
 /**
@@ -463,49 +481,6 @@
 	const u8 *data;
 };
 
-/*
- * Tx-backoff threshold
- * @temperature: The threshold in Celsius
- * @backoff: The tx-backoff in uSec
- */
-struct iwl_tt_tx_backoff {
-	s32 temperature;
-	u32 backoff;
-};
-
-#define TT_TX_BACKOFF_SIZE 6
-
-/**
- * struct iwl_tt_params - thermal throttling parameters
- * @ct_kill_entry: CT Kill entry threshold
- * @ct_kill_exit: CT Kill exit threshold
- * @ct_kill_duration: The time  intervals (in uSec) in which the driver needs
- *	to checks whether to exit CT Kill.
- * @dynamic_smps_entry: Dynamic SMPS entry threshold
- * @dynamic_smps_exit: Dynamic SMPS exit threshold
- * @tx_protection_entry: TX protection entry threshold
- * @tx_protection_exit: TX protection exit threshold
- * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
- * @support_ct_kill: Support CT Kill?
- * @support_dynamic_smps: Support dynamic SMPS?
- * @support_tx_protection: Support tx protection?
- * @support_tx_backoff: Support tx-backoff?
- */
-struct iwl_tt_params {
-	s32 ct_kill_entry;
-	s32 ct_kill_exit;
-	u32 ct_kill_duration;
-	s32 dynamic_smps_entry;
-	s32 dynamic_smps_exit;
-	s32 tx_protection_entry;
-	s32 tx_protection_exit;
-	struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
-	bool support_ct_kill;
-	bool support_dynamic_smps;
-	bool support_tx_protection;
-	bool support_tx_backoff;
-};
-
 /**
  * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
  * @ct_kill_exit: worker to exit thermal kill
@@ -520,7 +495,7 @@
 	bool dynamic_smps;
 	u32 tx_backoff;
 	u32 min_backoff;
-	const struct iwl_tt_params *params;
+	struct iwl_tt_params params;
 	bool throttle;
 };
 
@@ -647,13 +622,15 @@
 	u32 rts_threshold;
 
 	/* Scan status, cmd (pre-allocated) and auxiliary station */
-	enum iwl_scan_status scan_status;
+	unsigned int scan_status;
 	void *scan_cmd;
 	struct iwl_mcast_filter_cmd *mcast_filter_cmd;
 
+	/* max number of simultaneous scans the FW supports */
+	unsigned int max_scans;
+
 	/* UMAC scan tracking */
-	u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
-	u8 scan_seq_num, sched_scan_seq_num;
+	u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
 
 	/* rx chain antennas set through debugfs for the scan command */
 	u8 scan_rx_ant;
@@ -843,6 +820,8 @@
 	} tdls_cs;
 
 	struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+
+	u32 ciphers[6];
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -912,14 +891,15 @@
 	return mvm->trans->cfg->d0i3 &&
 	       mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
 	       !iwlwifi_mod_params.d0i3_disable &&
-	       (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+	       fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
 	bool nvm_lar = mvm->nvm_data->lar_enabled;
-	bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
-		IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+	bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+				   IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
 	if (iwlwifi_mod_params.lar_disable)
 		return false;
@@ -936,24 +916,28 @@
 
 static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
 {
-	return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
-	       mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+	return fw_has_api(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+	       fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
 }
 
 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 {
-	return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
+	return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
 }
 
 static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
 {
-	return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
 		IWL_MVM_BT_COEX_CORUNNING;
 }
 
 static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
 {
-	return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
 		IWL_MVM_BT_COEX_RRC;
 }
 
@@ -1083,8 +1067,6 @@
 		      struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 			struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			 struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
 				  struct iwl_rx_cmd_buffer *rxb,
 				  struct iwl_device_cmd *cmd);
@@ -1093,8 +1075,6 @@
 int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
 				struct iwl_rx_cmd_buffer *rxb,
 				struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			 struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 			    struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
@@ -1146,48 +1126,38 @@
 			  struct ieee80211_vif *disabled_vif);
 
 /* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   struct cfg80211_scan_request *req,
+			   struct ieee80211_scan_ies *ies);
 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
-					   struct iwl_rx_cmd_buffer *rxb,
-					   struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
-						struct iwl_rx_cmd_buffer *rxb,
-						struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
-				       struct cfg80211_sched_scan_request *req);
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
-			       struct ieee80211_vif *vif,
-			       struct cfg80211_sched_scan_request *req,
-			       struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd);
-
-/* Unified scan */
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
-			      struct ieee80211_vif *vif,
-			      struct ieee80211_scan_request *req);
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
-				    struct ieee80211_vif *vif,
-				    struct cfg80211_sched_scan_request *req,
-				    struct ieee80211_scan_ies *ies);
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					struct iwl_rx_cmd_buffer *rxb,
+					struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					     struct iwl_rx_cmd_buffer *rxb,
+					     struct iwl_device_cmd *cmd);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     struct cfg80211_sched_scan_request *req,
+			     struct ieee80211_scan_ies *ies,
+			     int type);
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb,
+				struct iwl_device_cmd *cmd);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-		      struct ieee80211_scan_request *req);
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			    struct cfg80211_sched_scan_request *req,
-			    struct ieee80211_scan_ies *ies);
 int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
 					struct iwl_rx_cmd_buffer *rxb,
 					struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					     struct iwl_rx_cmd_buffer *rxb,
+					     struct iwl_device_cmd *cmd);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 87b2a30..2a6be35 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -316,8 +316,8 @@
 	phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
 
 	lar_enabled = !iwlwifi_mod_params.lar_disable &&
-		      (mvm->fw->ucode_capa.capa[0] &
-		       IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+		      fw_has_capa(&mvm->fw->ucode_capa,
+				  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
 	return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
 				  regulatory, mac_override, phy_sku,
@@ -583,9 +583,9 @@
 		kfree(nvm_buffer);
 	}
 
-	/* load external NVM if configured */
+	/* Only if PNVM selected in the mod param - load external NVM  */
 	if (mvm->nvm_file_name) {
-		/* read External NVM file - take the default */
+		/* read External NVM file from the mod param */
 		ret = iwl_mvm_read_external_nvm(mvm);
 		if (ret) {
 			/* choose the nvm_file name according to the
@@ -792,8 +792,8 @@
 	char mcc[3];
 
 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-		tlv_lar = mvm->fw->ucode_capa.capa[0] &
-			IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+		tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+				      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 		nvm_lar = mvm->nvm_data->lar_enabled;
 		if (tlv_lar != nvm_lar)
 			IWL_INFO(mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2ea0123..e4fa500 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -194,7 +194,7 @@
 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
 	 * to lose ownership and not being able to obtain it back.
 	 */
-	if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+	if (!mvm->trans->cfg->apmg_not_supported)
 		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
 				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
 				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -238,15 +238,16 @@
 	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
 
 	RX_HANDLER(SCAN_ITERATION_COMPLETE,
-		   iwl_mvm_rx_scan_offload_iter_complete_notif, false),
+		   iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
 	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
-		   iwl_mvm_rx_scan_offload_complete_notif, true),
-	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
+		   iwl_mvm_rx_lmac_scan_complete_notif, true),
+	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
 		   false),
 	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
 		   true),
+	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+		   iwl_mvm_rx_umac_scan_iter_complete_notif, false),
 
-	RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
 	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
 
 	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
@@ -280,17 +281,11 @@
 	CMD(BINDING_CONTEXT_CMD),
 	CMD(TIME_QUOTA_CMD),
 	CMD(NON_QOS_TX_COUNTER_CMD),
-	CMD(RADIO_VERSION_NOTIFICATION),
-	CMD(SCAN_REQUEST_CMD),
-	CMD(SCAN_ABORT_CMD),
-	CMD(SCAN_START_NOTIFICATION),
-	CMD(SCAN_RESULTS_NOTIFICATION),
-	CMD(SCAN_COMPLETE_NOTIFICATION),
+	CMD(DC2DC_CONFIG_CMD),
 	CMD(NVM_ACCESS_CMD),
 	CMD(PHY_CONFIGURATION_CMD),
 	CMD(CALIB_RES_NOTIF_PHY_DB),
 	CMD(SET_CALIB_DEFAULT_CMD),
-	CMD(CALIBRATION_COMPLETE_NOTIFICATION),
 	CMD(ADD_STA_KEY),
 	CMD(ADD_STA),
 	CMD(REMOVE_STA),
@@ -359,6 +354,7 @@
 	CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
 	CMD(TDLS_CONFIG_CMD),
 	CMD(MCC_UPDATE_CMD),
+	CMD(SCAN_ITERATION_COMPLETE_UMAC),
 };
 #undef CMD
 
@@ -520,15 +516,12 @@
 
 	min_backoff = calc_min_backoff(trans, cfg);
 	iwl_mvm_tt_initialize(mvm, min_backoff);
-	/* set the nvm_file_name according to priority */
-	if (iwlwifi_mod_params.nvm_file) {
+
+	if (iwlwifi_mod_params.nvm_file)
 		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
-	} else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-		if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
-			mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
-		else
-			mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
-	}
+	else
+		IWL_DEBUG_EEPROM(mvm->trans->dev,
+				 "working without external nvm file\n");
 
 	if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
 		 "not allowing power-up and not having nvm_file\n"))
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 33cd68a..daff1d0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -138,7 +138,7 @@
 
 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
 				     struct ieee80211_sta *sta,
-				     struct iwl_scale_tbl_info *tbl,
+				     struct rs_rate *rate,
 				     const struct rs_tx_column *next_col);
 
 struct rs_tx_column {
@@ -150,14 +150,14 @@
 };
 
 static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			 struct iwl_scale_tbl_info *tbl,
+			 struct rs_rate *rate,
 			 const struct rs_tx_column *next_col)
 {
 	return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
 }
 
 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  struct iwl_scale_tbl_info *tbl,
+			  struct rs_rate *rate,
 			  const struct rs_tx_column *next_col)
 {
 	struct iwl_mvm_sta *mvmsta;
@@ -187,7 +187,7 @@
 }
 
 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  struct iwl_scale_tbl_info *tbl,
+			  struct rs_rate *rate,
 			  const struct rs_tx_column *next_col)
 {
 	if (!sta->ht_cap.ht_supported)
@@ -197,10 +197,9 @@
 }
 
 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			 struct iwl_scale_tbl_info *tbl,
+			 struct rs_rate *rate,
 			 const struct rs_tx_column *next_col)
 {
-	struct rs_rate *rate = &tbl->rate;
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 
@@ -1128,8 +1127,8 @@
 	u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-	bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
-		IWL_UCODE_TLV_API_LQ_SS_PARAMS;
+	bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
+					     IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
 	/* Treat uninitialized rate scaling data same as non-existing. */
 	if (!lq_sta) {
@@ -1659,7 +1658,8 @@
 
 		for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
 			allow_func = next_col->checks[j];
-			if (allow_func && !allow_func(mvm, sta, tbl, next_col))
+			if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+						      next_col))
 				break;
 		}
 
@@ -2136,7 +2136,7 @@
 	}
 
 	/* current tx rate */
-	index = lq_sta->last_txrate_idx;
+	index = rate->index;
 
 	/* rates available for this association, and for modulation mode */
 	rate_mask = rs_get_supported_rates(lq_sta, rate);
@@ -2184,14 +2184,7 @@
 		 * or search for a new one? */
 		rs_stay_in_table(lq_sta, false);
 
-		goto out;
-	}
-	/* Else we have enough samples; calculate estimate of
-	 * actual average throughput */
-	if (window->average_tpt != ((window->success_ratio *
-			tbl->expected_tpt[index] + 64) / 128)) {
-		window->average_tpt = ((window->success_ratio *
-					tbl->expected_tpt[index] + 64) / 128);
+		return;
 	}
 
 	/* If we are searching for better modulation mode, check success. */
@@ -2403,9 +2396,6 @@
 			rs_set_stay_in_table(mvm, 0, lq_sta);
 		}
 	}
-
-out:
-	lq_sta->last_txrate_idx = index;
 }
 
 struct rs_init_rate_info {
@@ -2548,7 +2538,6 @@
 	rate = &tbl->rate;
 
 	rs_get_initial_rate(mvm, lq_sta, band, rate);
-	lq_sta->last_txrate_idx = rate->index;
 
 	WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
 	if (rate->ant == ANT_A)
@@ -2725,7 +2714,7 @@
 	    (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
 		lq_sta->stbc_capable = true;
 
-	if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
 	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
 	    (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
 		lq_sta->bfer_capable = true;
@@ -3009,7 +2998,7 @@
 	valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
 
 	/* TODO: remove old API when min FW API hits 14 */
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
 	    rs_stbc_allow(mvm, sta, lq_sta))
 		rate.stbc = true;
 
@@ -3223,12 +3212,9 @@
 
 	rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
 
-	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
 		rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
 
-	if (num_of_ant(initial_rate->ant) == 1)
-		lq_cmd->single_stream_ant_msk = initial_rate->ant;
-
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index e4aa934..2a3da31 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -322,8 +322,6 @@
 	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
 	u8 tx_agg_tid_en;
 
-	/* used to be in sta_info */
-	int last_txrate_idx;
 	/* last tx rate_n_flags */
 	u32 last_rate_n_flags;
 	/* packets destined for this STA are aggregated */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index d6314dd..8f1d93b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -570,7 +570,7 @@
 	};
 	u32 temperature;
 
-	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
 		struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
 
 		if (iwl_rx_packet_payload_len(pkt) != v10_len)
@@ -610,7 +610,7 @@
 	/* Only handle rx statistics temperature changes if async temp
 	 * notifications are not supported
 	 */
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
 		iwl_mvm_tt_temp_changed(mvm, temperature);
 
 	ieee80211_iterate_active_interfaces(mvm->hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 1075a21..5de1449 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -67,11 +67,8 @@
 #include <net/mac80211.h>
 
 #include "mvm.h"
-#include "iwl-eeprom-parse.h"
 #include "fw-api-scan.h"
 
-#define IWL_PLCP_QUIET_THRESH 1
-#define IWL_ACTIVE_QUIET_TIME 10
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
@@ -79,23 +76,31 @@
 	u32 max_out_time;
 	u32 suspend_time;
 	bool passive_fragmented;
+	u32 n_channels;
+	u16 delay;
+	int n_ssids;
+	struct cfg80211_ssid *ssids;
+	struct ieee80211_channel **channels;
+	u16 interval; /* interval between scans (in secs) */
+	u32 flags;
+	u8 *mac_addr;
+	u8 *mac_addr_mask;
+	bool no_cck;
+	bool pass_all;
+	int n_match_sets;
+	struct iwl_scan_probe_req preq;
+	struct cfg80211_match_set *match_sets;
 	struct _dwell {
 		u16 passive;
 		u16 active;
 		u16 fragmented;
 	} dwell[IEEE80211_NUM_BANDS];
+	struct {
+		u8 iterations;
+		u8 full_scan_mul; /* not used for UMAC */
+	} schedule[2];
 };
 
-enum iwl_umac_scan_uid_type {
-	IWL_UMAC_SCAN_UID_REG_SCAN	= BIT(0),
-	IWL_UMAC_SCAN_UID_SCHED_SCAN	= BIT(1),
-	IWL_UMAC_SCAN_UID_ALL		= IWL_UMAC_SCAN_UID_REG_SCAN |
-					  IWL_UMAC_SCAN_UID_SCHED_SCAN,
-};
-
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
-			      enum iwl_umac_scan_uid_type type, bool notify);
-
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 {
 	if (mvm->scan_rx_ant != ANT_NONE)
@@ -143,28 +148,6 @@
 }
 
 /*
- * We insert the SSIDs in an inverted order, because the FW will
- * invert it back. The most prioritized SSID, which is first in the
- * request list, is not copied here, but inserted directly to the probe
- * request.
- */
-static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
-				    struct cfg80211_ssid *ssids,
-				    int n_ssids, int first)
-{
-	int fw_idx, req_idx;
-
-	for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
-	     req_idx--, fw_idx++) {
-		cmd_ssid[fw_idx].id = WLAN_EID_SSID;
-		cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
-		memcpy(cmd_ssid[fw_idx].ssid,
-		       ssids[req_idx].ssid,
-		       ssids[req_idx].ssid_len);
-	}
-}
-
-/*
  * If req->n_ssids > 0, it means we should do an active scan.
  * In case of active scan w/o directed scan, we receive a zero-length SSID
  * just to notify that this scan is active and not passive.
@@ -177,7 +160,7 @@
 static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
 				    enum ieee80211_band band, int n_ssids)
 {
-	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
 		return 10;
 	if (band == IEEE80211_BAND_2GHZ)
 		return 20  + 3 * (n_ssids + 1);
@@ -187,7 +170,7 @@
 static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
 				     enum ieee80211_band band)
 {
-	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
 			return 110;
 	return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
@@ -203,10 +186,9 @@
 		*global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
-				     struct ieee80211_vif *vif,
-				     int n_ssids, u32 flags,
-				     struct iwl_mvm_scan_params *params)
+static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif,
+				    struct iwl_mvm_scan_params *params)
 {
 	int global_cnt = 0;
 	enum ieee80211_band band;
@@ -216,7 +198,6 @@
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_scan_condition_iterator,
 					    &global_cnt);
-
 	if (!global_cnt)
 		goto not_bound;
 
@@ -224,8 +205,9 @@
 	params->max_out_time = 120;
 
 	if (iwl_mvm_low_latency(mvm)) {
-		if (mvm->fw->ucode_capa.api[0] &
-		    IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
+		if (fw_has_api(&mvm->fw->ucode_capa,
+			       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+
 			params->suspend_time = 105;
 			/*
 			 * If there is more than one active interface make
@@ -239,8 +221,9 @@
 		}
 	}
 
-	if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
-				   IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+	if (frag_passive_dwell &&
+	    fw_has_api(&mvm->fw->ucode_capa,
+		       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
 		/*
 		 * P2P device scan should not be fragmented to avoid negative
 		 * impact on P2P device discovery. Configure max_out_time to be
@@ -257,7 +240,8 @@
 		}
 	}
 
-	if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+	if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+	    (params->max_out_time > 200))
 		params->max_out_time = 200;
 
 not_bound:
@@ -268,20 +252,34 @@
 
 		params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
 									band);
-		params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
-								      n_ssids);
+		params->dwell[band].active =
+			iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
 	}
+
+	IWL_DEBUG_SCAN(mvm,
+		       "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
+		       params->max_out_time, params->suspend_time,
+		       params->passive_fragmented);
+	IWL_DEBUG_SCAN(mvm,
+		       "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
+		       params->dwell[IEEE80211_BAND_2GHZ].passive,
+		       params->dwell[IEEE80211_BAND_2GHZ].active,
+		       params->dwell[IEEE80211_BAND_2GHZ].fragmented);
+	IWL_DEBUG_SCAN(mvm,
+		       "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
+		       params->dwell[IEEE80211_BAND_5GHZ].passive,
+		       params->dwell[IEEE80211_BAND_5GHZ].active,
+		       params->dwell[IEEE80211_BAND_5GHZ].fragmented);
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
 {
 	/* require rrm scan whenever the fw supports it */
-	return mvm->fw->ucode_capa.capa[0] &
-	       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
 }
 
-static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
-					   bool is_sched_scan)
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
 {
 	int max_probe_len;
 
@@ -297,9 +295,9 @@
 	return max_probe_len;
 }
 
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
 {
-	int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
+	int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
 
 	/* TODO: [BUG] This function should return the maximum allowed size of
 	 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
@@ -314,22 +312,41 @@
 	return max_ie_len;
 }
 
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
-						struct iwl_rx_cmd_buffer *rxb,
-						struct iwl_device_cmd *cmd)
+static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
+				     int num_res, u8 *buf, size_t buf_size)
+{
+	int i;
+	u8 *pos = buf, *end = buf + buf_size;
+
+	for (i = 0; pos < end && i < num_res; i++)
+		pos += snprintf(pos, end - pos, " %u", res[i].channel);
+
+	/* terminate the string in case the buffer was too short */
+	*(buf + buf_size - 1) = '\0';
+
+	return buf;
+}
+
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					     struct iwl_rx_cmd_buffer *rxb,
+					     struct iwl_device_cmd *cmd)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+	u8 buf[256];
 
 	IWL_DEBUG_SCAN(mvm,
-		       "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
-		       notif->status, notif->scanned_channels);
+		       "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
+		       notif->status, notif->scanned_channels,
+		       iwl_mvm_dump_channel_list(notif->results,
+						 notif->scanned_channels, buf,
+						 sizeof(buf)));
 	return 0;
 }
 
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb,
+				struct iwl_device_cmd *cmd)
 {
 	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
 	ieee80211_sched_scan_results(mvm->hw);
@@ -337,41 +354,78 @@
 	return 0;
 }
 
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
-					   struct iwl_rx_cmd_buffer *rxb,
-					   struct iwl_device_cmd *cmd)
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+	switch (status) {
+	case IWL_SCAN_EBS_SUCCESS:
+		return "successful";
+	case IWL_SCAN_EBS_INACTIVE:
+		return "inactive";
+	case IWL_SCAN_EBS_FAILED:
+	case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+	default:
+		return "failed";
+	}
+}
+
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					struct iwl_rx_cmd_buffer *rxb,
+					struct iwl_device_cmd *cmd)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_periodic_scan_complete *scan_notif;
-
-	scan_notif = (void *)pkt->data;
+	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+	bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
 	/* scan status must be locked for proper checking */
 	lockdep_assert_held(&mvm->mutex);
 
-	IWL_DEBUG_SCAN(mvm,
-		       "%s completed, status %s, EBS status %s\n",
-		       mvm->scan_status == IWL_MVM_SCAN_SCHED ?
-				"Scheduled scan" : "Scan",
-		       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
-				"completed" : "aborted",
-		       scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
-				"success" : "failed");
+	/* We first check if we were stopping a scan, in which case we
+	 * just clear the stopping flag.  Then we check if it was a
+	 * firmware initiated stop, in which case we need to inform
+	 * mac80211.
+	 * Note that we can have a stopping and a running scan
+	 * simultaneously, but we can't have two different types of
+	 * scans stopping or running at the same time (since LMAC
+	 * doesn't support it).
+	 */
 
+	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 
-	/* only call mac80211 completion if the stop was initiated by FW */
-	if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
-		mvm->scan_status = IWL_MVM_SCAN_NONE;
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 		ieee80211_sched_scan_stopped(mvm->hw);
-	} else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
-		mvm->scan_status = IWL_MVM_SCAN_NONE;
+	} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
 		ieee80211_scan_completed(mvm->hw,
 				scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 	}
 
-	if (scan_notif->ebs_status)
-		mvm->last_ebs_successful = false;
+	mvm->last_ebs_successful =
+			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
 
 	return 0;
 }
@@ -390,9 +444,12 @@
 	return -1;
 }
 
-static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
-					struct iwl_ssid_ie *direct_scan,
-					u32 *ssid_bitmap, bool basic_ssid)
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+				 struct iwl_ssid_ie *ssids,
+				 u32 *ssid_bitmap)
 {
 	int i, j;
 	int index;
@@ -402,39 +459,41 @@
 	 * iwl_config_sched_scan_profiles() uses the order of these ssids to
 	 * config match list.
 	 */
-	for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+	for (i = 0, j = params->n_match_sets - 1;
+	     j >= 0 && i < PROBE_OPTION_MAX;
+	     i++, j--) {
 		/* skip empty SSID matchsets */
-		if (!req->match_sets[i].ssid.ssid_len)
+		if (!params->match_sets[j].ssid.ssid_len)
 			continue;
-		direct_scan[i].id = WLAN_EID_SSID;
-		direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
-		memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
-		       direct_scan[i].len);
+		ssids[i].id = WLAN_EID_SSID;
+		ssids[i].len = params->match_sets[j].ssid.ssid_len;
+		memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+		       ssids[i].len);
 	}
 
 	/* add SSIDs from scan SSID list */
 	*ssid_bitmap = 0;
-	for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
-		index = iwl_ssid_exist(req->ssids[j].ssid,
-				       req->ssids[j].ssid_len,
-				       direct_scan);
+	for (j = params->n_ssids - 1;
+	     j >= 0 && i < PROBE_OPTION_MAX;
+	     i++, j--) {
+		index = iwl_ssid_exist(params->ssids[j].ssid,
+				       params->ssids[j].ssid_len,
+				       ssids);
 		if (index < 0) {
-			if (!req->ssids[j].ssid_len && basic_ssid)
-				continue;
-			direct_scan[i].id = WLAN_EID_SSID;
-			direct_scan[i].len = req->ssids[j].ssid_len;
-			memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
-			       direct_scan[i].len);
-			*ssid_bitmap |= BIT(i + 1);
-			i++;
+			ssids[i].id = WLAN_EID_SSID;
+			ssids[i].len = params->ssids[j].ssid_len;
+			memcpy(ssids[i].ssid, params->ssids[j].ssid,
+			       ssids[i].len);
+			*ssid_bitmap |= BIT(i);
 		} else {
-			*ssid_bitmap |= BIT(index + 1);
+			*ssid_bitmap |= BIT(index);
 		}
 	}
 }
 
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
-				       struct cfg80211_sched_scan_request *req)
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+				   struct cfg80211_sched_scan_request *req)
 {
 	struct iwl_scan_offload_profile *profile;
 	struct iwl_scan_offload_profile_cfg *profile_cfg;
@@ -515,30 +574,7 @@
 	return true;
 }
 
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
-			       struct ieee80211_vif *vif,
-			       struct cfg80211_sched_scan_request *req,
-			       struct ieee80211_scan_ies *ies)
-{
-	int ret;
-
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-		ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
-		if (ret)
-			return ret;
-		ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
-	} else {
-		mvm->scan_status = IWL_MVM_SCAN_SCHED;
-		ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
-		if (ret)
-			return ret;
-		ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
-	}
-
-	return ret;
-}
-
-static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
 {
 	int ret;
 	struct iwl_host_cmd cmd = {
@@ -546,12 +582,6 @@
 	};
 	u32 status;
 
-	/* Exit instantly with error when device is not ready
-	 * to receive scan abort command or it does not perform
-	 * scheduled scan currently */
-	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-		return -EIO;
-
 	ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
 	if (ret)
 		return ret;
@@ -571,69 +601,9 @@
 	return ret;
 }
 
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
-{
-	int ret;
-	struct iwl_notification_wait wait_scan_done;
-	static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
-	bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-		return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
-					  notify);
-
-	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-		return 0;
-
-	if (iwl_mvm_is_radio_killed(mvm)) {
-		ret = 0;
-		goto out;
-	}
-
-	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
-				   scan_done_notif,
-				   ARRAY_SIZE(scan_done_notif),
-				   NULL, NULL);
-
-	ret = iwl_mvm_send_scan_offload_abort(mvm);
-	if (ret) {
-		IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
-			       sched ? "offloaded " : "", ret);
-		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
-		goto out;
-	}
-
-	IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
-		       sched ? "offloaded " : "");
-
-	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-out:
-	/*
-	 * Clear the scan status so the next scan requests will succeed. This
-	 * also ensures the Rx handler doesn't do anything, as the scan was
-	 * stopped from above. Since the rx handler won't do anything now,
-	 * we have to release the scan reference here.
-	 */
-	if (mvm->scan_status == IWL_MVM_SCAN_OS)
-		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
-	mvm->scan_status = IWL_MVM_SCAN_NONE;
-
-	if (notify) {
-		if (sched)
-			ieee80211_sched_scan_stopped(mvm->hw);
-		else
-			ieee80211_scan_completed(mvm->hw, true);
-	}
-
-	return ret;
-}
-
-static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
-					     struct iwl_scan_req_tx_cmd *tx_cmd,
-					     bool no_cck)
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+				     struct iwl_scan_req_tx_cmd *tx_cmd,
+				     bool no_cck)
 {
 	tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 					 TX_CMD_FLG_BT_DIS);
@@ -654,7 +624,7 @@
 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
 			       struct ieee80211_channel **channels,
 			       int n_channels, u32 ssid_bitmap,
-			       struct iwl_scan_req_unified_lmac *cmd)
+			       struct iwl_scan_req_lmac *cmd)
 {
 	struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
 	int i;
@@ -707,13 +677,14 @@
 }
 
 static void
-iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-				 struct ieee80211_scan_ies *ies,
-				 struct iwl_scan_probe_req *preq,
-				 const u8 *mac_addr, const u8 *mac_addr_mask)
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			 struct ieee80211_scan_ies *ies,
+			 struct iwl_mvm_scan_params *params)
 {
-	struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
+	struct ieee80211_mgmt *frame = (void *)params->preq.buf;
 	u8 *pos, *newpos;
+	const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+		params->mac_addr : NULL;
 
 	/*
 	 * Unfortunately, right now the offload scan doesn't support randomising
@@ -722,7 +693,8 @@
 	 * random, only when it's restarted, but at least that helps a bit.
 	 */
 	if (mac_addr)
-		get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
+		get_random_mask_addr(frame->sa, mac_addr,
+				     params->mac_addr_mask);
 	else
 		memcpy(frame->sa, vif->addr, ETH_ALEN);
 
@@ -735,245 +707,167 @@
 	*pos++ = WLAN_EID_SSID;
 	*pos++ = 0;
 
-	preq->mac_header.offset = 0;
-	preq->mac_header.len = cpu_to_le16(24 + 2);
+	params->preq.mac_header.offset = 0;
+	params->preq.mac_header.len = cpu_to_le16(24 + 2);
 
 	/* Insert ds parameter set element on 2.4 GHz band */
 	newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
 						 ies->ies[IEEE80211_BAND_2GHZ],
 						 ies->len[IEEE80211_BAND_2GHZ],
 						 pos);
-	preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
-	preq->band_data[0].len = cpu_to_le16(newpos - pos);
+	params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+	params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
 	pos = newpos;
 
 	memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
 	       ies->len[IEEE80211_BAND_5GHZ]);
-	preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
-	preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
+	params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+	params->preq.band_data[1].len =
+		cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
 	pos += ies->len[IEEE80211_BAND_5GHZ];
 
 	memcpy(pos, ies->common_ies, ies->common_ie_len);
-	preq->common_data.offset = cpu_to_le16(pos - preq->buf);
-	preq->common_data.len = cpu_to_le16(ies->common_ie_len);
+	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+	params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
 }
 
-static void
-iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
-				       struct iwl_scan_req_unified_lmac *cmd,
-				       struct iwl_mvm_scan_params *params)
+static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
+				    enum iwl_scan_priority_ext prio)
 {
-	memset(cmd, 0, ksize(cmd));
+	if (fw_has_api(&mvm->fw->ucode_capa,
+		       IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
+		return cpu_to_le32(prio);
+
+	if (prio <= IWL_SCAN_PRIORITY_EXT_2)
+		return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+	if (prio <= IWL_SCAN_PRIORITY_EXT_4)
+		return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
+
+	return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+				    struct iwl_scan_req_lmac *cmd,
+				    struct iwl_mvm_scan_params *params)
+{
 	cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
 	cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
 	if (params->passive_fragmented)
 		cmd->fragmented_dwell =
 				params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
 	cmd->max_out_time = cpu_to_le32(params->max_out_time);
 	cmd->suspend_time = cpu_to_le32(params->suspend_time);
-	cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-	cmd->iter_num = cpu_to_le32(1);
-
-	if (iwl_mvm_rrm_scan_needed(mvm))
-		cmd->scan_flags |=
-			cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
+	cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
-			      struct ieee80211_vif *vif,
-			      struct ieee80211_scan_request *req)
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+				     struct ieee80211_scan_ies *ies,
+				     int n_channels)
 {
-	struct iwl_host_cmd hcmd = {
-		.id = SCAN_OFFLOAD_REQUEST_CMD,
-		.len = { sizeof(struct iwl_scan_req_unified_lmac) +
-			 sizeof(struct iwl_scan_channel_cfg_lmac) *
-				mvm->fw->ucode_capa.n_scan_channels +
-			 sizeof(struct iwl_scan_probe_req), },
-		.data = { mvm->scan_cmd, },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
-	struct iwl_scan_probe_req *preq;
-	struct iwl_mvm_scan_params params = {};
-	u32 flags;
-	u32 ssid_bitmap = 0;
-	int ret, i;
+	return ((n_ssids <= PROBE_OPTION_MAX) &&
+		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+		(ies->common_ie_len +
+		 ies->len[NL80211_BAND_2GHZ] +
+		 ies->len[NL80211_BAND_5GHZ] <=
+		 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
 
-	lockdep_assert_held(&mvm->mutex);
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					int n_iterations)
+{
+	const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
 
-	/* we should have failed registration if scan_cmd was NULL */
-	if (WARN_ON(mvm->scan_cmd == NULL))
-		return -ENOMEM;
+	/* We can only use EBS if:
+	 *	1. the feature is supported;
+	 *	2. the last EBS was successful;
+	 *	3. if only single scan, the single scan EBS API is supported;
+	 *	4. it's not a p2p find operation.
+	 */
+	return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+		mvm->last_ebs_successful &&
+		(n_iterations > 1 ||
+		 fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
+		vif->type != NL80211_IFTYPE_P2P_DEVICE);
+}
 
-	if (req->req.n_ssids > PROBE_OPTION_MAX ||
-	    req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
-	    req->ies.len[NL80211_BAND_5GHZ] >
-		iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
-	    req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
-		return -ENOBUFS;
+static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
+{
+	return params->schedule[0].iterations + params->schedule[1].iterations;
+}
 
-	mvm->scan_status = IWL_MVM_SCAN_OS;
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+				   struct iwl_mvm_scan_params *params)
+{
+	int flags = 0;
 
-	iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
-				 &params);
-
-	iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
-
-	cmd->n_channels = (u8)req->req.n_channels;
-
-	flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
-
-	if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
-		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
-
-	if (params.passive_fragmented)
-		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
-
-	if (req->req.n_ssids == 0)
+	if (params->n_ssids == 0)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
 
-	cmd->scan_flags |= cpu_to_le32(flags);
+	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-	cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
-	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
-					MAC_FILTER_IN_BEACON);
-	iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
-	iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
-				req->req.n_ssids, 0);
+	if (params->passive_fragmented)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
-	cmd->schedule[0].delay = 0;
-	cmd->schedule[0].iterations = 1;
-	cmd->schedule[0].full_scan_mul = 0;
-	cmd->schedule[1].delay = 0;
-	cmd->schedule[1].iterations = 0;
-	cmd->schedule[1].full_scan_mul = 0;
+	if (iwl_mvm_rrm_scan_needed(mvm))
+		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 
-	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
-	    mvm->last_ebs_successful) {
-		cmd->channel_opt[0].flags =
-			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-		cmd->channel_opt[0].non_ebs_ratio =
-			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
-		cmd->channel_opt[1].flags =
-			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-		cmd->channel_opt[1].non_ebs_ratio =
-			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
-	}
-
-	for (i = 1; i <= req->req.n_ssids; i++)
-		ssid_bitmap |= BIT(i);
-
-	iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
-				       req->req.n_channels, ssid_bitmap,
-				       cmd);
-
-	preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
-			mvm->fw->ucode_capa.n_scan_channels);
-
-	iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
-		req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-			req->req.mac_addr : NULL,
-		req->req.mac_addr_mask);
-
-	ret = iwl_mvm_send_cmd(mvm, &hcmd);
-	if (!ret) {
-		IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
-	} else {
-		/*
-		 * If the scan failed, it usually means that the FW was unable
-		 * to allocate the time events. Warn on it, but maybe we
-		 * should try to send the command again with different params.
-		 */
-		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
-		mvm->scan_status = IWL_MVM_SCAN_NONE;
-		ret = -EIO;
-	}
-	return ret;
-}
-
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
-				    struct ieee80211_vif *vif,
-				    struct cfg80211_sched_scan_request *req,
-				    struct ieee80211_scan_ies *ies)
-{
-	struct iwl_host_cmd hcmd = {
-		.id = SCAN_OFFLOAD_REQUEST_CMD,
-		.len = { sizeof(struct iwl_scan_req_unified_lmac) +
-			 sizeof(struct iwl_scan_channel_cfg_lmac) *
-				mvm->fw->ucode_capa.n_scan_channels +
-			 sizeof(struct iwl_scan_probe_req), },
-		.data = { mvm->scan_cmd, },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
-	struct iwl_scan_probe_req *preq;
-	struct iwl_mvm_scan_params params = {};
-	int ret;
-	u32 flags = 0, ssid_bitmap = 0;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	/* we should have failed registration if scan_cmd was NULL */
-	if (WARN_ON(mvm->scan_cmd == NULL))
-		return -ENOMEM;
-
-	if (req->n_ssids > PROBE_OPTION_MAX ||
-	    ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
-	    ies->len[NL80211_BAND_5GHZ] >
-		iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
-	    req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
-		return -ENOBUFS;
-
-	iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
-
-	iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
-
-	cmd->n_channels = (u8)req->n_channels;
-
-	cmd->delay = cpu_to_le32(req->delay);
-
-	if (iwl_mvm_scan_pass_all(mvm, req))
+	if (params->pass_all)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
 	else
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 
-	if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
-		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
-
-	if (params.passive_fragmented)
-		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
-
-	if (req->n_ssids == 0)
-		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
-
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	if (mvm->scan_iter_notif_enabled)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
 #endif
 
-	cmd->scan_flags |= cpu_to_le32(flags);
+	return flags;
+}
 
-	cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     struct iwl_mvm_scan_params *params)
+{
+	struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+	struct iwl_scan_probe_req *preq =
+		(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+			 mvm->fw->ucode_capa.n_scan_channels);
+	u32 ssid_bitmap = 0;
+	int n_iterations = iwl_mvm_scan_total_iterations(params);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	memset(cmd, 0, ksize(cmd));
+
+	iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
+
+	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+	cmd->iter_num = cpu_to_le32(1);
+	cmd->n_channels = (u8)params->n_channels;
+
+	cmd->delay = cpu_to_le32(params->delay);
+
+	cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
+
+	cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
 	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
 					MAC_FILTER_IN_BEACON);
-	iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
-	iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
+	iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+	iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
 
-	cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
-	cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-	cmd->schedule[0].full_scan_mul = 1;
+	/* this API uses bits 1-20 instead of 0-19 */
+	ssid_bitmap <<= 1;
 
-	cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
-	cmd->schedule[1].iterations = 0xff;
-	cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+	cmd->schedule[0].delay = cpu_to_le16(params->interval);
+	cmd->schedule[0].iterations = params->schedule[0].iterations;
+	cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+	cmd->schedule[1].delay = cpu_to_le16(params->interval);
+	cmd->schedule[1].iterations = params->schedule[1].iterations;
+	cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
-	    mvm->last_ebs_successful) {
+	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
 		cmd->channel_opt[0].flags =
 			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
 				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -988,61 +882,14 @@
 			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
 	}
 
-	iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
-				       ssid_bitmap, cmd);
+	iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+				       params->n_channels, ssid_bitmap, cmd);
 
-	preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
-			mvm->fw->ucode_capa.n_scan_channels);
+	*preq = params->preq;
 
-	iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
-		req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-			req->mac_addr : NULL,
-		req->mac_addr_mask);
-
-	ret = iwl_mvm_send_cmd(mvm, &hcmd);
-	if (!ret) {
-		IWL_DEBUG_SCAN(mvm,
-			       "Sched scan request was sent successfully\n");
-	} else {
-		/*
-		 * If the scan failed, it usually means that the FW was unable
-		 * to allocate the time events. Warn on it, but maybe we
-		 * should try to send the command again with different params.
-		 */
-		IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
-		mvm->scan_status = IWL_MVM_SCAN_NONE;
-		ret = -EIO;
-	}
-	return ret;
+	return 0;
 }
 
-
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
-{
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-		return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
-					  true);
-
-	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-		return 0;
-
-	if (iwl_mvm_is_radio_killed(mvm)) {
-		ieee80211_scan_completed(mvm->hw, true);
-		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-		mvm->scan_status = IWL_MVM_SCAN_NONE;
-		return 0;
-	}
-
-	return iwl_mvm_scan_offload_stop(mvm, true);
-}
-
-/* UMAC scan API */
-
-struct iwl_umac_scan_done {
-	struct iwl_mvm *mvm;
-	enum iwl_umac_scan_uid_type type;
-};
-
 static int rate_to_scan_rate_flag(unsigned int rate)
 {
 	static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
@@ -1151,79 +998,21 @@
 	return ret;
 }
 
-static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
 {
 	int i;
 
-	for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
-		if (mvm->scan_uid[i] == uid)
+	for (i = 0; i < mvm->max_scans; i++)
+		if (mvm->scan_uid_status[i] == status)
 			return i;
 
-	return i;
+	return -ENOENT;
 }
 
-static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
-{
-	return iwl_mvm_find_scan_uid(mvm, 0);
-}
-
-static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
-				   enum iwl_umac_scan_uid_type type)
-{
-	int i;
-
-	for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
-		if (mvm->scan_uid[i] & type)
-			return true;
-
-	return false;
-}
-
-static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
-				   enum iwl_umac_scan_uid_type type)
-{
-	int i;
-
-	for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
-		if (mvm->scan_uid[i] & type)
-			return i;
-
-	return i;
-}
-
-static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
-				 enum iwl_umac_scan_uid_type type)
-{
-	u32 uid;
-
-	/* make sure exactly one bit is on in scan type */
-	WARN_ON(hweight8(type) != 1);
-
-	/*
-	 * Make sure scan uids are unique. If one scan lasts long time while
-	 * others are completing frequently, the seq number will wrap up and
-	 * we may have more than one scan with the same uid.
-	 */
-	do {
-		uid = type | (mvm->scan_seq_num <<
-			      IWL_UMAC_SCAN_UID_SEQ_OFFSET);
-		mvm->scan_seq_num++;
-	} while (iwl_mvm_find_scan_uid(mvm, uid) <
-		 IWL_MVM_MAX_SIMULTANEOUS_SCANS);
-
-	IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
-
-	return uid;
-}
-
-static void
-iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
 				    struct iwl_scan_req_umac *cmd,
 				    struct iwl_mvm_scan_params *params)
 {
-	memset(cmd, 0, ksize(cmd));
-	cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-				    sizeof(struct iwl_mvm_umac_cmd_hdr));
 	cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
 	cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
 	if (params->passive_fragmented)
@@ -1231,7 +1020,15 @@
 				params->dwell[IEEE80211_BAND_2GHZ].fragmented;
 	cmd->max_out_time = cpu_to_le32(params->max_out_time);
 	cmd->suspend_time = cpu_to_le32(params->suspend_time);
-	cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+	cmd->scan_priority =
+		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+
+	if (iwl_mvm_scan_total_iterations(params) == 0)
+		cmd->ooc_priority =
+			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+	else
+		cmd->ooc_priority =
+			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
 }
 
 static void
@@ -1251,230 +1048,326 @@
 	}
 }
 
-static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
-					  struct cfg80211_ssid *ssids,
-					  int fragmented)
+static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+				   struct iwl_mvm_scan_params *params)
 {
 	int flags = 0;
 
-	if (n_ssids == 0)
+	if (params->n_ssids == 0)
 		flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
 
-	if (n_ssids == 1 && ssids[0].ssid_len != 0)
+	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-	if (fragmented)
+	if (params->passive_fragmented)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
 
+	if (params->pass_all)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+	else
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+	if (iwl_mvm_scan_total_iterations(params) > 1)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvm->scan_iter_notif_enabled)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
 	return flags;
 }
 
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-		      struct ieee80211_scan_request *req)
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     struct iwl_mvm_scan_params *params,
+			     int type)
 {
-	struct iwl_host_cmd hcmd = {
-		.id = SCAN_REQ_UMAC,
-		.len = { iwl_mvm_scan_size(mvm), },
-		.data = { mvm->scan_cmd, },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
 	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
 	struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
 		sizeof(struct iwl_scan_channel_cfg_umac) *
 			mvm->fw->ucode_capa.n_scan_channels;
-	struct iwl_mvm_scan_params params = {};
-	u32 uid, flags;
+	int uid;
 	u32 ssid_bitmap = 0;
-	int ret, i, uid_idx;
+	int n_iterations = iwl_mvm_scan_total_iterations(params);
 
 	lockdep_assert_held(&mvm->mutex);
 
-	uid_idx = iwl_mvm_find_free_scan_uid(mvm);
-	if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
-		return -EBUSY;
+	uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+	if (uid < 0)
+		return uid;
 
-	/* we should have failed registration if scan_cmd was NULL */
-	if (WARN_ON(mvm->scan_cmd == NULL))
-		return -ENOMEM;
+	memset(cmd, 0, ksize(cmd));
+	cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
+				    sizeof(struct iwl_mvm_umac_cmd_hdr));
 
-	if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
-		    req->ies.common_ie_len +
-		    req->ies.len[NL80211_BAND_2GHZ] +
-		    req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
-		    SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
-		    mvm->fw->ucode_capa.n_scan_channels))
-		return -ENOBUFS;
+	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
-	iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
-				 &params);
+	mvm->scan_uid_status[uid] = type;
 
-	iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
-
-	uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
-	mvm->scan_uid[uid_idx] = uid;
 	cmd->uid = cpu_to_le32(uid);
+	cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
 
-	cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-
-	flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
-					       req->req.ssids,
-					       params.passive_fragmented);
-
-	flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
-
-	cmd->general_flags = cpu_to_le32(flags);
-
-	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
-	    mvm->last_ebs_successful)
+	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
 		cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
 				     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
 				     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
-	cmd->n_channels = req->req.n_channels;
+	cmd->n_channels = params->n_channels;
 
-	for (i = 0; i < req->req.n_ssids; i++)
-		ssid_bitmap |= BIT(i);
+	iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
 
-	iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
-				       req->req.n_channels, ssid_bitmap, cmd);
+	iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+				       params->n_channels, ssid_bitmap, cmd);
 
-	sec_part->schedule[0].iter_count = 1;
-	sec_part->delay = 0;
+	/* With UMAC we use only one schedule for now, so use the sum
+	 * of the iterations (with a a maximum of 255).
+	 */
+	sec_part->schedule[0].iter_count =
+		(n_iterations > 255) ? 255 : n_iterations;
+	sec_part->schedule[0].interval = cpu_to_le16(params->interval);
 
-	iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
-		req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-			req->req.mac_addr : NULL,
-		req->req.mac_addr_mask);
+	sec_part->delay = cpu_to_le16(params->delay);
+	sec_part->preq = params->preq;
 
-	iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
-				req->req.n_ssids, 0);
+	return 0;
+}
+
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+	return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
+
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+	/* This looks a bit arbitrary, but the idea is that if we run
+	 * out of possible simultaneous scans and the userspace is
+	 * trying to run a scan type that is already running, we
+	 * return -EBUSY.  But if the userspace wants to start a
+	 * different type of scan, we stop the opposite type to make
+	 * space for the new request.  The reason is backwards
+	 * compatibility with old wpa_supplicant that wouldn't stop a
+	 * scheduled scan before starting a normal scan.
+	 */
+
+	if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+		return 0;
+
+	/* Use a switch, even though this is a bitmask, so that more
+	 * than one bits set will fall in default and we will warn.
+	 */
+	switch (type) {
+	case IWL_MVM_SCAN_REGULAR:
+		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+			return -EBUSY;
+		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+	case IWL_MVM_SCAN_SCHED:
+		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+			return -EBUSY;
+		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+	case IWL_MVM_SCAN_NETDETECT:
+		/* No need to stop anything for net-detect since the
+		 * firmware is restarted anyway.  This way, any sched
+		 * scans that were running will be restarted when we
+		 * resume.
+		*/
+		return 0;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	return -EIO;
+}
+
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   struct cfg80211_scan_request *req,
+			   struct ieee80211_scan_ies *ies)
+{
+	struct iwl_host_cmd hcmd = {
+		.len = { iwl_mvm_scan_size(mvm), },
+		.data = { mvm->scan_cmd, },
+		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
+	};
+	struct iwl_mvm_scan_params params = {};
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+		return -EBUSY;
+	}
+
+	ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+	if (ret)
+		return ret;
+
+	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+
+	/* we should have failed registration if scan_cmd was NULL */
+	if (WARN_ON(!mvm->scan_cmd))
+		return -ENOMEM;
+
+	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+		return -ENOBUFS;
+
+	params.n_ssids = req->n_ssids;
+	params.flags = req->flags;
+	params.n_channels = req->n_channels;
+	params.delay = 0;
+	params.interval = 0;
+	params.ssids = req->ssids;
+	params.channels = req->channels;
+	params.mac_addr = req->mac_addr;
+	params.mac_addr_mask = req->mac_addr_mask;
+	params.no_cck = req->no_cck;
+	params.pass_all = true;
+	params.n_match_sets = 0;
+	params.match_sets = NULL;
+
+	params.schedule[0].iterations = 1;
+	params.schedule[0].full_scan_mul = 0;
+	params.schedule[1].iterations = 0;
+	params.schedule[1].full_scan_mul = 0;
+
+	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		hcmd.id = SCAN_REQ_UMAC;
+		ret = iwl_mvm_scan_umac(mvm, vif, &params,
+					IWL_MVM_SCAN_REGULAR);
+	} else {
+		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+	}
+
+	if (ret)
+		return ret;
 
 	ret = iwl_mvm_send_cmd(mvm, &hcmd);
 	if (!ret) {
-		IWL_DEBUG_SCAN(mvm,
-			       "Scan request was sent successfully\n");
+		IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+		mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
 	} else {
-		/*
-		 * If the scan failed, it usually means that the FW was unable
+		/* If the scan failed, it usually means that the FW was unable
 		 * to allocate the time events. Warn on it, but maybe we
 		 * should try to send the command again with different params.
 		 */
 		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
 	}
+
+	if (ret)
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
 	return ret;
 }
 
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			    struct cfg80211_sched_scan_request *req,
-			    struct ieee80211_scan_ies *ies)
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     struct cfg80211_sched_scan_request *req,
+			     struct ieee80211_scan_ies *ies,
+			     int type)
 {
-
 	struct iwl_host_cmd hcmd = {
-		.id = SCAN_REQ_UMAC,
 		.len = { iwl_mvm_scan_size(mvm), },
 		.data = { mvm->scan_cmd, },
 		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
 	};
-	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
-	struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
-		sizeof(struct iwl_scan_channel_cfg_umac) *
-			mvm->fw->ucode_capa.n_scan_channels;
 	struct iwl_mvm_scan_params params = {};
-	u32 uid, flags;
-	u32 ssid_bitmap = 0;
-	int ret, uid_idx;
+	int ret;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	uid_idx = iwl_mvm_find_free_scan_uid(mvm);
-	if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
 		return -EBUSY;
+	}
+
+	ret = iwl_mvm_check_running_scans(mvm, type);
+	if (ret)
+		return ret;
 
 	/* we should have failed registration if scan_cmd was NULL */
-	if (WARN_ON(mvm->scan_cmd == NULL))
+	if (WARN_ON(!mvm->scan_cmd))
 		return -ENOMEM;
 
-	if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
-		    ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
-		    ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
-		    SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
-		    mvm->fw->ucode_capa.n_scan_channels))
+	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
 		return -ENOBUFS;
 
-	iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
-					 &params);
+	params.n_ssids = req->n_ssids;
+	params.flags = req->flags;
+	params.n_channels = req->n_channels;
+	params.ssids = req->ssids;
+	params.channels = req->channels;
+	params.mac_addr = req->mac_addr;
+	params.mac_addr_mask = req->mac_addr_mask;
+	params.no_cck = false;
+	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
+	params.n_match_sets = req->n_match_sets;
+	params.match_sets = req->match_sets;
 
-	iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+	params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
+	params.schedule[0].full_scan_mul = 1;
+	params.schedule[1].iterations = 0xff;
+	params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
 
-	cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+	if (req->interval > U16_MAX) {
+		IWL_DEBUG_SCAN(mvm,
+			       "interval value is > 16-bits, set to max possible\n");
+		params.interval = U16_MAX;
+	} else {
+		params.interval = req->interval / MSEC_PER_SEC;
+	}
 
-	uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
-	mvm->scan_uid[uid_idx] = uid;
-	cmd->uid = cpu_to_le32(uid);
-
-	cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
-
-	flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
-					       params.passive_fragmented);
-
-	flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
-
-	if (iwl_mvm_scan_pass_all(mvm, req))
-		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
-	else
-		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
-
-	cmd->general_flags = cpu_to_le32(flags);
-
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
-	    mvm->last_ebs_successful)
-		cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
-				     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-				     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
-
-	cmd->n_channels = req->n_channels;
-
-	iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
-				    false);
-
-	/* This API uses bits 0-19 instead of 1-20. */
-	ssid_bitmap = ssid_bitmap >> 1;
-
-	iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
-				       ssid_bitmap, cmd);
-
-	sec_part->schedule[0].interval =
-				cpu_to_le16(req->interval / MSEC_PER_SEC);
-	sec_part->schedule[0].iter_count = 0xff;
-
+	/* In theory, LMAC scans can handle a 32-bit delay, but since
+	 * waiting for over 18 hours to start the scan is a bit silly
+	 * and to keep it aligned with UMAC scans (which only support
+	 * 16-bit delays), trim it down to 16-bits.
+	 */
 	if (req->delay > U16_MAX) {
 		IWL_DEBUG_SCAN(mvm,
 			       "delay value is > 16-bits, set to max possible\n");
-		sec_part->delay = cpu_to_le16(U16_MAX);
+		params.delay = U16_MAX;
 	} else {
-		sec_part->delay = cpu_to_le16(req->delay);
+		params.delay = req->delay;
 	}
 
-	iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
-		req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-			req->mac_addr : NULL,
-		req->mac_addr_mask);
+	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+	if (ret)
+		return ret;
+
+	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		hcmd.id = SCAN_REQ_UMAC;
+		ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
+	} else {
+		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+	}
+
+	if (ret)
+		return ret;
 
 	ret = iwl_mvm_send_cmd(mvm, &hcmd);
 	if (!ret) {
 		IWL_DEBUG_SCAN(mvm,
 			       "Sched scan request was sent successfully\n");
+		mvm->scan_status |= type;
 	} else {
-		/*
-		 * If the scan failed, it usually means that the FW was unable
+		/* If the scan failed, it usually means that the FW was unable
 		 * to allocate the time events. Warn on it, but maybe we
 		 * should try to send the command again with different params.
 		 */
 		IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
 	}
+
 	return ret;
 }
 
@@ -1485,150 +1378,124 @@
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
 	u32 uid = __le32_to_cpu(notif->uid);
-	bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
-	int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
-	/*
-	 * Scan uid may be set to zero in case of scan abort request from above.
-	 */
-	if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
 		return 0;
 
+	/* if the scan is already stopping, we don't need to notify mac80211 */
+	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+		ieee80211_scan_completed(mvm->hw, aborted);
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+	} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+		ieee80211_sched_scan_stopped(mvm->hw);
+	}
+
+	mvm->scan_status &= ~mvm->scan_uid_status[uid];
+
 	IWL_DEBUG_SCAN(mvm,
-		       "Scan completed, uid %u type %s, status %s, EBS status %s\n",
-		       uid, sched ? "sched" : "regular",
+		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+		       uid, mvm->scan_uid_status[uid],
 		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
 				"completed" : "aborted",
-		       notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
-				"success" : "failed");
+		       iwl_mvm_ebs_status_str(notif->ebs_status));
 
-	if (notif->ebs_status)
+	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
 		mvm->last_ebs_successful = false;
 
-	mvm->scan_uid[uid_idx] = 0;
-
-	if (!sched) {
-		ieee80211_scan_completed(mvm->hw,
-					 notif->status ==
-						IWL_SCAN_OFFLOAD_ABORTED);
-		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-	} else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
-		ieee80211_sched_scan_stopped(mvm->hw);
-	} else {
-		IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
-	}
+	mvm->scan_uid_status[uid] = 0;
 
 	return 0;
 }
 
-static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
-				     struct iwl_rx_packet *pkt, void *data)
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					     struct iwl_rx_cmd_buffer *rxb,
+					     struct iwl_device_cmd *cmd)
 {
-	struct iwl_umac_scan_done *scan_done = data;
-	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
-	u32 uid = __le32_to_cpu(notif->uid);
-	int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+	u8 buf[256];
 
-	if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
-		return false;
-
-	if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
-		return false;
-
-	/*
-	 * Clear scan uid of scans that was aborted from above and completed
-	 * in FW so the RX handler does nothing. Set last_ebs_successful here if
-	 * needed.
-	 */
-	scan_done->mvm->scan_uid[uid_idx] = 0;
-
-	if (notif->ebs_status)
-		scan_done->mvm->last_ebs_successful = false;
-
-	return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+	IWL_DEBUG_SCAN(mvm,
+		       "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
+		       notif->status, notif->scanned_channels,
+		       iwl_mvm_dump_channel_list(notif->results,
+						 notif->scanned_channels, buf,
+						 sizeof(buf)));
+	return 0;
 }
 
-static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
 	struct iwl_umac_scan_abort cmd = {
 		.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
 					sizeof(struct iwl_mvm_umac_cmd_hdr)),
-		.uid = cpu_to_le32(uid),
 	};
+	int uid, ret;
 
 	lockdep_assert_held(&mvm->mutex);
 
+	/* We should always get a valid index here, because we already
+	 * checked that this type of scan was running in the generic
+	 * code.
+	 */
+	uid = iwl_mvm_scan_uid_by_status(mvm, type);
+	if (WARN_ON_ONCE(uid < 0))
+		return uid;
+
+	cmd.uid = cpu_to_le32(uid);
+
 	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-	return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+	ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+	if (!ret)
+		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+	return ret;
 }
 
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
-			      enum iwl_umac_scan_uid_type type, bool notify)
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
 	struct iwl_notification_wait wait_scan_done;
-	static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
-	struct iwl_umac_scan_done scan_done = {
-		.mvm = mvm,
-		.type = type,
-	};
-	int i, ret = -EIO;
+	static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+					      SCAN_OFFLOAD_COMPLETE, };
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
 
 	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
 				   scan_done_notif,
 				   ARRAY_SIZE(scan_done_notif),
-				   iwl_scan_umac_done_check, &scan_done);
+				   NULL, NULL);
 
 	IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
 
-	for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
-		if (mvm->scan_uid[i] & type) {
-			int err;
-
-			if (iwl_mvm_is_radio_killed(mvm) &&
-			    (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
-				ieee80211_scan_completed(mvm->hw, true);
-				iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-				break;
-			}
-
-			err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
-			if (!err)
-				ret = 0;
-		}
-	}
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		ret = iwl_mvm_umac_scan_abort(mvm, type);
+	else
+		ret = iwl_mvm_lmac_scan_abort(mvm);
 
 	if (ret) {
-		IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+		IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
 		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
 		return ret;
 	}
 
 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-	if (ret)
-		return ret;
-
-	if (notify) {
-		if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
-			ieee80211_sched_scan_stopped(mvm->hw);
-		if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
-			ieee80211_scan_completed(mvm->hw, true);
-			iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-		}
-	}
 
 	return ret;
 }
 
 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
 		return sizeof(struct iwl_scan_req_umac) +
 			sizeof(struct iwl_scan_channel_cfg_umac) *
 				mvm->fw->ucode_capa.n_scan_channels +
 			sizeof(struct iwl_scan_req_umac_tail);
 
-	return sizeof(struct iwl_scan_req_unified_lmac) +
+	return sizeof(struct iwl_scan_req_lmac) +
 		sizeof(struct iwl_scan_channel_cfg_lmac) *
 		mvm->fw->ucode_capa.n_scan_channels +
 		sizeof(struct iwl_scan_probe_req);
@@ -1640,47 +1507,76 @@
  */
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
 {
-	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-		u32 uid, i;
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		int uid, i;
 
-		uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
-		if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS) {
+		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+		if (uid >= 0) {
 			ieee80211_scan_completed(mvm->hw, true);
-			mvm->scan_uid[uid] = 0;
+			mvm->scan_uid_status[uid] = 0;
 		}
-		uid = iwl_mvm_find_first_scan(mvm,
-					      IWL_UMAC_SCAN_UID_SCHED_SCAN);
-		if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS && !mvm->restart_fw) {
+		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+		if (uid >= 0 && !mvm->restart_fw) {
 			ieee80211_sched_scan_stopped(mvm->hw);
-			mvm->scan_uid[uid] = 0;
+			mvm->scan_uid_status[uid] = 0;
 		}
 
 		/* We shouldn't have any UIDs still set.  Loop over all the
 		 * UIDs to make sure there's nothing left there and warn if
 		 * any is found.
 		 */
-		for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
-			if (WARN_ONCE(mvm->scan_uid[i],
-				      "UMAC scan UID %d was not cleaned\n",
-				      mvm->scan_uid[i]))
-				mvm->scan_uid[i] = 0;
+		for (i = 0; i < mvm->max_scans; i++) {
+			if (WARN_ONCE(mvm->scan_uid_status[i],
+				      "UMAC scan UID %d status was not cleaned\n",
+				      i))
+				mvm->scan_uid_status[i] = 0;
 		}
 	} else {
-		switch (mvm->scan_status) {
-		case IWL_MVM_SCAN_NONE:
-			break;
-		case IWL_MVM_SCAN_OS:
+		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
 			ieee80211_scan_completed(mvm->hw, true);
-			break;
-		case IWL_MVM_SCAN_SCHED:
-			/*
-			 * Sched scan will be restarted by mac80211 in
-			 * restart_hw, so do not report if FW is about to be
-			 * restarted.
-			 */
-			if (!mvm->restart_fw)
-				ieee80211_sched_scan_stopped(mvm->hw);
-			break;
-		}
+
+		/* Sched scan will be restarted by mac80211 in
+		 * restart_hw, so do not report if FW is about to be
+		 * restarted.
+		 */
+		if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
+			ieee80211_sched_scan_stopped(mvm->hw);
 	}
 }
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+	int ret;
+
+	if (!(mvm->scan_status & type))
+		return 0;
+
+	if (iwl_mvm_is_radio_killed(mvm)) {
+		ret = 0;
+		goto out;
+	}
+
+	ret = iwl_mvm_scan_stop_wait(mvm, type);
+	if (!ret)
+		mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+	/* Clear the scan status so the next scan requests will
+	 * succeed and mark the scan as stopping, so that the Rx
+	 * handler doesn't do anything, as the scan was stopped from
+	 * above.
+	 */
+	mvm->scan_status &= ~type;
+
+	if (type == IWL_MVM_SCAN_REGULAR) {
+		/* Since the rx handler won't do anything now, we have
+		 * to release the scan reference here.
+		 */
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		if (notify)
+			ieee80211_scan_completed(mvm->hw, true);
+	} else if (notify) {
+		ieee80211_sched_scan_stopped(mvm->hw);
+	}
+
+	return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 1845b79..d68dc69 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -5,8 +5,8 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1000,13 +1000,13 @@
 
 	fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
+	iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
+			       buf_size, ssn, wdg_timeout);
+
 	ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
 	if (ret)
 		return -EIO;
 
-	iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-			       buf_size, ssn, wdg_timeout);
-
 	/*
 	 * Even though in theory the peer could have different
 	 * aggregation reorder buffer sizes for different sessions,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index fd7b0d3..d24b6a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,12 +108,14 @@
 	 * in the case that the time event actually completed in the firmware
 	 * (which is handled in iwl_mvm_te_handle_notif).
 	 */
-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
 		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+	}
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
 		queues |= BIT(mvm->aux_queue);
-
-	iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+	}
 
 	synchronize_net();
 
@@ -393,6 +395,7 @@
 	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
 		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
 		te_data->running = true;
+		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
 	} else {
 		IWL_DEBUG_TE(mvm,
@@ -794,13 +797,12 @@
 
 void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
 {
-	struct iwl_mvm_vif *mvmvif;
+	struct iwl_mvm_vif *mvmvif = NULL;
 	struct iwl_mvm_time_event_data *te_data;
 	bool is_p2p = false;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	mvmvif = NULL;
 	spin_lock_bh(&mvm->time_event_lock);
 
 	/*
@@ -818,17 +820,14 @@
 		}
 	}
 
-	/*
-	 * Iterate over the list of aux roc time events and find the time
-	 * event that is associated with a BSS interface.
-	 * This assumes that a BSS interface can have only a single time
-	 * event at any given time and this time event corresponds to a ROC
-	 * request
+	/* There can only be at most one AUX ROC time event, we just use the
+	 * list to simplify/unify code. Remove it if it exists.
 	 */
-	list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+					   struct iwl_mvm_time_event_data,
+					   list);
+	if (te_data)
 		mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-		goto remove_te;
-	}
 
 remove_te:
 	spin_unlock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index ba615ad..80d07db 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -70,7 +70,7 @@
 static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
 {
 	struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
-	u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
+	u32 duration = tt->params.ct_kill_duration;
 
 	if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
 		return;
@@ -223,7 +223,7 @@
 	tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
 	mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
 
-	duration = tt->params->ct_kill_duration;
+	duration = tt->params.ct_kill_duration;
 
 	mutex_lock(&mvm->mutex);
 
@@ -247,7 +247,7 @@
 
 	IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
 
-	if (temp <= tt->params->ct_kill_exit) {
+	if (temp <= tt->params.ct_kill_exit) {
 		mutex_unlock(&mvm->mutex);
 		iwl_mvm_exit_ctkill(mvm);
 		return;
@@ -325,7 +325,7 @@
 
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
 {
-	const struct iwl_tt_params *params = mvm->thermal_throttle.params;
+	struct iwl_tt_params *params = &mvm->thermal_throttle.params;
 	struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
 	s32 temperature = mvm->temperature;
 	bool throttle_enable = false;
@@ -340,7 +340,7 @@
 	}
 
 	if (params->support_ct_kill &&
-	    temperature <= tt->params->ct_kill_exit) {
+	    temperature <= params->ct_kill_exit) {
 		iwl_mvm_exit_ctkill(mvm);
 		return;
 	}
@@ -400,7 +400,7 @@
 	}
 }
 
-static const struct iwl_tt_params iwl7000_tt_params = {
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
 	.ct_kill_entry = 118,
 	.ct_kill_exit = 96,
 	.ct_kill_duration = 5,
@@ -422,38 +422,16 @@
 	.support_tx_backoff = true,
 };
 
-static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
-	.ct_kill_entry = 118,
-	.ct_kill_exit = 96,
-	.ct_kill_duration = 5,
-	.dynamic_smps_entry = 114,
-	.dynamic_smps_exit = 110,
-	.tx_protection_entry = 114,
-	.tx_protection_exit = 108,
-	.tx_backoff = {
-		{.temperature = 112, .backoff = 300},
-		{.temperature = 113, .backoff = 800},
-		{.temperature = 114, .backoff = 1500},
-		{.temperature = 115, .backoff = 3000},
-		{.temperature = 116, .backoff = 5000},
-		{.temperature = 117, .backoff = 10000},
-	},
-	.support_ct_kill = true,
-	.support_dynamic_smps = true,
-	.support_tx_protection = true,
-	.support_tx_backoff = true,
-};
-
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
 {
 	struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
 
 	IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
 
-	if (mvm->cfg->high_temp)
-		tt->params = &iwl7000_high_temp_tt_params;
+	if (mvm->cfg->thermal_params)
+		tt->params = *mvm->cfg->thermal_params;
 	else
-		tt->params = &iwl7000_tt_params;
+		tt->params = iwl_mvm_default_tt_params;
 
 	tt->throttle = false;
 	tt->dynamic_smps = false;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index ef32e17..7ba7a118 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,30 @@
 #include "mvm.h"
 #include "sta.h"
 
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+			  u16 tid, u16 ssn)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+		return;
+
+	if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+		return;
+
+	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+				    "BAR sent to %pM, tid %d, ssn %d",
+				    addr, tid, ssn);
+}
+
 /*
  * Sets most of the Tx cmd's fields
  */
@@ -101,12 +125,15 @@
 	} else if (ieee80211_is_back_req(fc)) {
 		struct ieee80211_bar *bar = (void *)skb->data;
 		u16 control = le16_to_cpu(bar->control);
+		u16 ssn = le16_to_cpu(bar->start_seq_num);
 
 		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 		tx_cmd->tid_tspec = (control &
 				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
 			IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
 		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+		iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+					  ssn);
 	} else {
 		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
 		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -144,8 +171,8 @@
 	    !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
 		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
 
-	if ((mvm->fw->ucode_capa.capa[0] &
-	     IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
 	    ieee80211_action_contains_tpc(skb))
 		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index bc55a8b..03f8e06 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -584,7 +584,7 @@
 	struct iwl_error_event_table table;
 	u32 base;
 
-	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
 		iwl_mvm_dump_nic_error_log_old(mvm);
 		return;
 	}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index b185697..2ed1e4d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -629,7 +629,18 @@
 	if (!trans->op_mode)
 		return 0;
 
-	iwl_enable_rfkill_int(trans);
+	/*
+	 * On suspend, ict is disabled, and the interrupt mask
+	 * gets cleared. Reconfigure them both in case of d0i3
+	 * image. Otherwise, only enable rfkill interrupt (in
+	 * order to keep track of the rfkill status)
+	 */
+	if (trans->wowlan_d0i3) {
+		iwl_pcie_reset_ict(trans);
+		iwl_enable_interrupts(trans);
+	} else {
+		iwl_enable_rfkill_int(trans);
+	}
 
 	hw_rfkill = iwl_is_rfkill_set(trans);
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 376b84e..31f72a6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,6 +44,15 @@
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +86,29 @@
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
 	__le32 *bd;
 	dma_addr_t bd_dma;
-	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 	u32 read;
 	u32 write;
 	u32 free_count;
+	u32 used_count;
 	u32 write_actual;
 	struct list_head rx_free;
 	struct list_head rx_used;
@@ -107,6 +116,32 @@
 	struct iwl_rb_status *rb_stts;
 	dma_addr_t rb_stts_dma;
 	spinlock_t lock;
+	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *	the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *	of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+	struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+	atomic_t req_pending;
+	atomic_t req_ready;
+	struct list_head rbd_allocated;
+	struct list_head rbd_empty;
+	spinlock_t lock;
+	struct workqueue_struct *alloc_wq;
+	struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +285,7 @@
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -273,7 +308,7 @@
  */
 struct iwl_trans_pcie {
 	struct iwl_rxq rxq;
-	struct work_struct rx_replenish;
+	struct iwl_rb_allocator rba;
 	struct iwl_trans *trans;
 	struct iwl_drv *drv;
 
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 7ff69c6..a3fbaa0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -74,16 +74,29 @@
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
@@ -92,18 +105,32 @@
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@
 		rxq->free_count--;
 	}
 	spin_unlock(&rxq->lock);
-	/* If the pre-allocated buffer pool is dropping low, schedule to
-	 * refill it */
-	if (rxq->free_count <= RX_LOW_WATERMARK)
-		schedule_work(&trans_pcie->rx_replenish);
 
 	/* If we've added more space for the firmware to place data, tell it.
 	 * Increment device's write pointer in multiples of 8. */
@@ -255,6 +278,44 @@
 }
 
 /*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct page *page;
+	gfp_t gfp_mask = GFP_KERNEL;
+
+	if (rxq->free_count > RX_LOW_WATERMARK)
+		gfp_mask |= __GFP_NOWARN;
+
+	if (trans_pcie->rx_page_order > 0)
+		gfp_mask |= __GFP_COMP;
+
+	/* Alloc a new receive buffer */
+	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+	if (!page) {
+		if (net_ratelimit())
+			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+				       trans_pcie->rx_page_order);
+		/* Issue an error if the hardware has consumed more than half
+		 * of its free buffer list and we don't have enough
+		 * pre-allocated buffers.
+`		 */
+		if (rxq->free_count <= RX_LOW_WATERMARK &&
+		    iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+		    net_ratelimit())
+			IWL_CRIT(trans,
+				 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+				 rxq->free_count);
+		return NULL;
+	}
+	return page;
+}
+
+/*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
  * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -263,13 +324,12 @@
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	struct page *page;
-	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock(&rxq->lock);
@@ -279,32 +339,10 @@
 		}
 		spin_unlock(&rxq->lock);
 
-		if (rxq->free_count > RX_LOW_WATERMARK)
-			gfp_mask |= __GFP_NOWARN;
-
-		if (trans_pcie->rx_page_order > 0)
-			gfp_mask |= __GFP_COMP;
-
 		/* Alloc a new receive buffer */
-		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-		if (!page) {
-			if (net_ratelimit())
-				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-					   "order: %d\n",
-					   trans_pcie->rx_page_order);
-
-			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-			    net_ratelimit())
-				IWL_CRIT(trans, "Failed to alloc_pages with %s."
-					 "Only %u free buffers remaining.\n",
-					 priority == GFP_ATOMIC ?
-					 "GFP_ATOMIC" : "GFP_KERNEL",
-					 rxq->free_count);
-			/* We don't reschedule replenish work here -- we will
-			 * call the restock method and if it still needs
-			 * more buffers it will schedule replenish */
+		page = iwl_pcie_rx_alloc_page(trans);
+		if (!page)
 			return;
-		}
 
 		spin_lock(&rxq->lock);
 
@@ -355,7 +393,7 @@
 
 	lockdep_assert_held(&rxq->lock);
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+	for (i = 0; i < RX_QUEUE_SIZE; i++) {
 		if (!rxq->pool[i].page)
 			continue;
 		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +410,144 @@
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-	iwl_pcie_rxq_alloc_rbs(trans, gfp);
+	iwl_pcie_rxq_alloc_rbs(trans);
 
 	iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie =
-	    container_of(data, struct iwl_trans_pcie, rx_replenish);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
-	iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+	while (atomic_read(&rba->req_pending)) {
+		int i;
+		struct list_head local_empty;
+		struct list_head local_allocated;
+
+		INIT_LIST_HEAD(&local_allocated);
+		spin_lock(&rba->lock);
+		/* swap out the entire rba->rbd_empty to a local list */
+		list_replace_init(&rba->rbd_empty, &local_empty);
+		spin_unlock(&rba->lock);
+
+		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+			struct iwl_rx_mem_buffer *rxb;
+			struct page *page;
+
+			/* List should never be empty - each reused RBD is
+			 * returned to the list, and initial pool covers any
+			 * possible gap between the time the page is allocated
+			 * to the time the RBD is added.
+			 */
+			BUG_ON(list_empty(&local_empty));
+			/* Get the first rxb from the rbd list */
+			rxb = list_first_entry(&local_empty,
+					       struct iwl_rx_mem_buffer, list);
+			BUG_ON(rxb->page);
+
+			/* Alloc a new receive buffer */
+			page = iwl_pcie_rx_alloc_page(trans);
+			if (!page)
+				continue;
+			rxb->page = page;
+
+			/* Get physical address of the RB */
+			rxb->page_dma = dma_map_page(trans->dev, page, 0,
+					PAGE_SIZE << trans_pcie->rx_page_order,
+					DMA_FROM_DEVICE);
+			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+				rxb->page = NULL;
+				__free_pages(page, trans_pcie->rx_page_order);
+				continue;
+			}
+			/* dma address must be no more than 36 bits */
+			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+			/* and also 256 byte aligned! */
+			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+			/* move the allocated entry to the out list */
+			list_move(&rxb->list, &local_allocated);
+			i++;
+		}
+
+		spin_lock(&rba->lock);
+		/* add the allocated rbds to the allocator allocated list */
+		list_splice_tail(&local_allocated, &rba->rbd_allocated);
+		/* add the unused rbds back to the allocator empty list */
+		list_splice_tail(&local_empty, &rba->rbd_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_dec(&rba->req_pending);
+		atomic_inc(&rba->req_ready);
+	}
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+				     struct iwl_rx_mem_buffer
+				     *out[RX_CLAIM_REQ_ALLOC])
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	if (atomic_dec_return(&rba->req_ready) < 0) {
+		atomic_inc(&rba->req_ready);
+		IWL_DEBUG_RX(trans,
+			     "Allocation request not ready, pending requests = %d\n",
+			     atomic_read(&rba->req_pending));
+		return -ENOMEM;
+	}
+
+	spin_lock(&rba->lock);
+	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+		/* Get next free Rx buffer, remove it from free list */
+		out[i] = list_first_entry(&rba->rbd_allocated,
+			       struct iwl_rx_mem_buffer, list);
+		list_del(&out[i]->list);
+	}
+	spin_unlock(&rba->lock);
+
+	return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+	struct iwl_rb_allocator *rba_p =
+		container_of(data, struct iwl_rb_allocator, rx_alloc);
+	struct iwl_trans_pcie *trans_pcie =
+		container_of(rba_p, struct iwl_trans_pcie, rba);
+
+	iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	struct device *dev = trans->dev;
 
 	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
 	spin_lock_init(&rxq->lock);
+	spin_lock_init(&rba->lock);
 
 	if (WARN_ON(rxq->bd || rxq->rb_stts))
 		return -EINVAL;
@@ -487,15 +637,49 @@
 	INIT_LIST_HEAD(&rxq->rx_free);
 	INIT_LIST_HEAD(&rxq->rx_used);
 	rxq->free_count = 0;
+	rxq->used_count = 0;
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
 		list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	INIT_LIST_HEAD(&rba->rbd_allocated);
+	INIT_LIST_HEAD(&rba->rbd_empty);
+
+	for (i = 0; i < RX_POOL_SIZE; i++)
+		list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	for (i = 0; i < RX_POOL_SIZE; i++) {
+		if (!rba->pool[i].page)
+			continue;
+		dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+			       PAGE_SIZE << trans_pcie->rx_page_order,
+			       DMA_FROM_DEVICE);
+		__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+		rba->pool[i].page = NULL;
+	}
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	int i, err;
 
 	if (!rxq->bd) {
@@ -503,11 +687,21 @@
 		if (err)
 			return err;
 	}
+	if (!rba->alloc_wq)
+		rba->alloc_wq = alloc_workqueue("rb_allocator",
+						WQ_HIGHPRI | WQ_UNBOUND, 1);
+	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+	spin_lock(&rba->lock);
+	atomic_set(&rba->req_pending, 0);
+	atomic_set(&rba->req_ready, 0);
+	/* free all first - we might be reconfigured for a different size */
+	iwl_pcie_rx_free_rba(trans);
+	iwl_pcie_rx_init_rba(rba);
+	spin_unlock(&rba->lock);
 
 	spin_lock(&rxq->lock);
 
-	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
 	/* free all first - we might be reconfigured for a different size */
 	iwl_pcie_rxq_free_rbs(trans);
 	iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +716,7 @@
 	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 	spin_unlock(&rxq->lock);
 
-	iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+	iwl_pcie_rx_replenish(trans);
 
 	iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +731,7 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
 	/*if rxq->bd is NULL, it means that nothing has been allocated,
 	 * exit now */
@@ -545,7 +740,15 @@
 		return;
 	}
 
-	cancel_work_sync(&trans_pcie->rx_replenish);
+	cancel_work_sync(&rba->rx_alloc);
+	if (rba->alloc_wq) {
+		destroy_workqueue(rba->alloc_wq);
+		rba->alloc_wq = NULL;
+	}
+
+	spin_lock(&rba->lock);
+	iwl_pcie_rx_free_rba(trans);
+	spin_unlock(&rba->lock);
 
 	spin_lock(&rxq->lock);
 	iwl_pcie_rxq_free_rbs(trans);
@@ -566,6 +769,43 @@
 	rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+				  struct iwl_rx_mem_buffer *rxb,
+				  struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+	/* Count the used RBDs */
+	rxq->used_count++;
+
+	/* Move the RBD to the used list, will be moved to allocator in batches
+	 * before claiming or posting a request*/
+	list_add_tail(&rxb->list, &rxq->rx_used);
+
+	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
+	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+	 * after but we still need to post another request.
+	 */
+	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+		/* Move the 2 RBDs to the allocator ownership.
+		 Allocator has another 6 from pool for the request completion*/
+		spin_lock(&rba->lock);
+		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_pending);
+		queue_work(rba->alloc_wq, &rba->rx_alloc);
+	}
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 				struct iwl_rx_mem_buffer *rxb)
 {
@@ -688,13 +928,13 @@
 			 */
 			__free_pages(rxb->page, trans_pcie->rx_page_order);
 			rxb->page = NULL;
-			list_add_tail(&rxb->list, &rxq->rx_used);
+			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
 		} else {
 			list_add_tail(&rxb->list, &rxq->rx_free);
 			rxq->free_count++;
 		}
 	} else
-		list_add_tail(&rxb->list, &rxq->rx_used);
+		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
 }
 
 /*
@@ -704,10 +944,7 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
-	u32 r, i;
-	u8 fill_rx = 0;
-	u32 count = 8;
-	int total_empty;
+	u32 r, i, j;
 
 restart:
 	spin_lock(&rxq->lock);
@@ -720,14 +957,6 @@
 	if (i == r)
 		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - rxq->write_actual;
-	if (total_empty < 0)
-		total_empty += RX_QUEUE_SIZE;
-
-	if (total_empty > (RX_QUEUE_SIZE / 2))
-		fill_rx = 1;
-
 	while (i != r) {
 		struct iwl_rx_mem_buffer *rxb;
 
@@ -739,29 +968,48 @@
 		iwl_pcie_rx_handle_rb(trans, rxb);
 
 		i = (i + 1) & RX_QUEUE_MASK;
-		/* If there are a lot of unused frames,
-		 * restock the Rx queue so ucode wont assert. */
-		if (fill_rx) {
-			count++;
-			if (count >= 8) {
-				rxq->read = i;
-				spin_unlock(&rxq->lock);
-				iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-				count = 0;
-				goto restart;
+
+		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+		 * try to claim the pre-allocated buffers from the allocator */
+		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+			struct iwl_rb_allocator *rba = &trans_pcie->rba;
+			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+			/* Add the remaining 6 empty RBDs for allocator use */
+			spin_lock(&rba->lock);
+			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+			spin_unlock(&rba->lock);
+
+			/* If not ready - continue, will try to reclaim later.
+			* No need to reschedule work - allocator exits only on
+			* success */
+			if (!iwl_pcie_rx_allocator_get(trans, out)) {
+				/* If success - then RX_CLAIM_REQ_ALLOC
+				 * buffers were retrieved and should be added
+				 * to free list */
+				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+					list_add_tail(&out[j]->list,
+						      &rxq->rx_free);
+					rxq->free_count++;
+				}
 			}
 		}
+		/* handle restock for two cases:
+		* - we just pulled buffers from the allocator
+		* - we have 8+ unstolen pages accumulated */
+		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+			rxq->read = i;
+			spin_unlock(&rxq->lock);
+			iwl_pcie_rxq_restock(trans);
+			goto restart;
+		}
 	}
 
 	/* Backtrack one entry */
 	rxq->read = i;
 	spin_unlock(&rxq->lock);
 
-	if (fill_rx)
-		iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-	else
-		iwl_pcie_rxq_restock(trans);
-
 	if (trans_pcie->napi.poll)
 		napi_gro_flush(&trans_pcie->napi, false);
 }
@@ -775,6 +1023,7 @@
 
 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
 	if (trans->cfg->internal_wimax_coex &&
+	    !trans->cfg->apmg_not_supported &&
 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dc17909..43ae658 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -101,14 +101,26 @@
 	trans_pcie->fw_mon_size = 0;
 }
 
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct page *page = NULL;
 	dma_addr_t phys;
-	u32 size;
+	u32 size = 0;
 	u8 power;
 
+	if (!max_power) {
+		/* default max_power is maximum */
+		max_power = 26;
+	} else {
+		max_power += 11;
+	}
+
+	if (WARN(max_power > 26,
+		 "External buffer size for monitor is too big %d, check the FW TLV\n",
+		 max_power))
+		return;
+
 	if (trans_pcie->fw_mon_page) {
 		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
 					   trans_pcie->fw_mon_size,
@@ -117,7 +129,7 @@
 	}
 
 	phys = 0;
-	for (power = 26; power >= 11; power--) {
+	for (power = max_power; power >= 11; power--) {
 		int order;
 
 		size = BIT(power);
@@ -143,6 +155,12 @@
 	if (WARN_ON_ONCE(!page))
 		return;
 
+	if (power != max_power)
+		IWL_ERR(trans,
+			"Sorry - debug buffer is only %luK while you requested %luK\n",
+			(unsigned long)BIT(power - 10),
+			(unsigned long)BIT(max_power - 10));
+
 	trans_pcie->fw_mon_page = page;
 	trans_pcie->fw_mon_phys = phys;
 	trans_pcie->fw_mon_size = size;
@@ -164,6 +182,9 @@
 
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
+	if (!trans->cfg->apmg_not_supported)
+		return;
+
 	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@@ -297,7 +318,7 @@
 	 * bits do not disable clocks.  This preserves any hardware
 	 * bits already set by default in "CLK_CTRL_REG" after reset.
 	 */
-	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+	if (!trans->cfg->apmg_not_supported) {
 		iwl_write_prph(trans, APMG_CLK_EN_REG,
 			       APMG_CLK_VAL_DMA_CLK_RQT);
 		udelay(20);
@@ -497,8 +518,7 @@
 
 	spin_unlock(&trans_pcie->irq_lock);
 
-	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
-		iwl_pcie_set_pwr(trans, false);
+	iwl_pcie_set_pwr(trans, false);
 
 	iwl_op_mode_nic_config(trans->op_mode);
 
@@ -834,7 +854,7 @@
 		 get_fw_dbg_mode_string(dest->monitor_mode));
 
 	if (dest->monitor_mode == EXTERNAL_MODE)
-		iwl_pcie_alloc_fw_monitor(trans);
+		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
 	else
 		IWL_WARN(trans, "PCI should have external buffer debug\n");
 
@@ -908,7 +928,7 @@
 	/* supported for 7000 only for the moment */
 	if (iwlwifi_mod_params.fw_monitor &&
 	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-		iwl_pcie_alloc_fw_monitor(trans);
+		iwl_pcie_alloc_fw_monitor(trans, 0);
 
 		if (trans_pcie->fw_mon_size) {
 			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
@@ -955,12 +975,8 @@
 		return ret;
 
 	/* load to FW the binary sections of CPU2 */
-	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
-					      &first_ucode_section);
-	if (ret)
-		return ret;
-
-	return 0;
+	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+					       &first_ucode_section);
 }
 
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
@@ -1049,7 +1065,7 @@
 		iwl_pcie_rx_stop(trans);
 
 		/* Power-down device's busmaster DMA clocks */
-		if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+		if (!trans->cfg->apmg_not_supported) {
 			iwl_write_prph(trans, APMG_CLK_DIS_REG,
 				       APMG_CLK_VAL_DMA_CLK_RQT);
 			udelay(5);
@@ -1346,14 +1362,13 @@
 	iounmap(trans_pcie->hw_base);
 	pci_release_regions(trans_pcie->pci_dev);
 	pci_disable_device(trans_pcie->pci_dev);
-	kmem_cache_destroy(trans->dev_cmd_pool);
 
 	if (trans_pcie->napi.poll)
 		netif_napi_del(&trans_pcie->napi);
 
 	iwl_pcie_free_fw_monitor(trans);
 
-	kfree(trans);
+	iwl_trans_free(trans);
 }
 
 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
@@ -2200,6 +2215,29 @@
 	return sizeof(**data) + fh_regs_len;
 }
 
+static u32
+iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+				 u32 monitor_len)
+{
+	u32 buf_size_in_dwords = (monitor_len >> 2);
+	u32 *buffer = (u32 *)fw_mon_data->data;
+	unsigned long flags;
+	u32 i;
+
+	if (!iwl_trans_grab_nic_access(trans, false, &flags))
+		return 0;
+
+	__iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+	for (i = 0; i < buf_size_in_dwords; i++)
+		buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
+	__iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+	return monitor_len;
+}
+
 static
 struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 {
@@ -2252,7 +2290,8 @@
 		      trans->dbg_dest_tlv->end_shift;
 
 		/* Make "end" point to the actual end */
-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
+		    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
 			end += (1 << trans->dbg_dest_tlv->end_shift);
 		monitor_len = end - base;
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2328,9 +2367,6 @@
 
 		len += sizeof(*data) + sizeof(*fw_mon_data);
 		if (trans_pcie->fw_mon_page) {
-			data->len = cpu_to_le32(trans_pcie->fw_mon_size +
-						sizeof(*fw_mon_data));
-
 			/*
 			 * The firmware is now asserted, it won't write anything
 			 * to the buffer. CPU can take ownership to fetch the
@@ -2345,10 +2381,8 @@
 			       page_address(trans_pcie->fw_mon_page),
 			       trans_pcie->fw_mon_size);
 
-			len += trans_pcie->fw_mon_size;
-		} else {
-			/* If we are here then the buffer is internal */
-
+			monitor_len = trans_pcie->fw_mon_size;
+		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
 			/*
 			 * Update pointers to reflect actual values after
 			 * shifting
@@ -2357,10 +2391,18 @@
 			       trans->dbg_dest_tlv->base_shift;
 			iwl_trans_read_mem(trans, base, fw_mon_data->data,
 					   monitor_len / sizeof(u32));
-			data->len = cpu_to_le32(sizeof(*fw_mon_data) +
-						monitor_len);
-			len += monitor_len;
+		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+			monitor_len =
+				iwl_trans_pci_dump_marbh_monitor(trans,
+								 fw_mon_data,
+								 monitor_len);
+		} else {
+			/* Didn't match anything - output no monitor data */
+			monitor_len = 0;
 		}
+
+		len += monitor_len;
+		data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
 	}
 
 	dump_data->len = len;
@@ -2419,18 +2461,13 @@
 	u16 pci_cmd;
 	int err;
 
-	trans = kzalloc(sizeof(struct iwl_trans) +
-			sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-	if (!trans) {
-		err = -ENOMEM;
-		goto out;
-	}
+	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+				&pdev->dev, cfg, &trans_ops_pcie, 0);
+	if (!trans)
+		return ERR_PTR(-ENOMEM);
 
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	trans->ops = &trans_ops_pcie;
-	trans->cfg = cfg;
-	trans_lockdep_init(trans);
 	trans_pcie->trans = trans;
 	spin_lock_init(&trans_pcie->irq_lock);
 	spin_lock_init(&trans_pcie->reg_lock);
@@ -2554,25 +2591,8 @@
 	/* Initialize the wait queue for commands */
 	init_waitqueue_head(&trans_pcie->wait_command_queue);
 
-	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
-		 "iwl_cmd_pool:%s", dev_name(trans->dev));
-
-	trans->dev_cmd_headroom = 0;
-	trans->dev_cmd_pool =
-		kmem_cache_create(trans->dev_cmd_pool_name,
-				  sizeof(struct iwl_device_cmd)
-				  + trans->dev_cmd_headroom,
-				  sizeof(void *),
-				  SLAB_HWCACHE_ALIGN,
-				  NULL);
-
-	if (!trans->dev_cmd_pool) {
-		err = -ENOMEM;
-		goto out_pci_disable_msi;
-	}
-
 	if (iwl_pcie_alloc_ict(trans))
-		goto out_free_cmd_pool;
+		goto out_pci_disable_msi;
 
 	err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
 				   iwl_pcie_irq_handler,
@@ -2589,8 +2609,6 @@
 
 out_free_ict:
 	iwl_pcie_free_ict(trans);
-out_free_cmd_pool:
-	kmem_cache_destroy(trans->dev_cmd_pool);
 out_pci_disable_msi:
 	pci_disable_msi(pdev);
 out_pci_release_regions:
@@ -2598,7 +2616,6 @@
 out_pci_disable_device:
 	pci_disable_device(pdev);
 out_no_pci:
-	kfree(trans);
-out:
+	iwl_trans_free(trans);
 	return ERR_PTR(err);
 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 5ef8044..2b86c21 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1049,8 +1049,6 @@
 	    !trans_pcie->cmd_hold_nic_awake) {
 		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
 					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-			udelay(2);
 
 		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
 				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1a4d558..8317afd 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -835,14 +835,13 @@
  * Events
  */
 
-void lbs_send_disconnect_notification(struct lbs_private *priv)
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+				      bool locally_generated)
 {
 	lbs_deb_enter(LBS_DEB_CFG80211);
 
-	cfg80211_disconnected(priv->dev,
-		0,
-		NULL, 0,
-		GFP_KERNEL);
+	cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated,
+			      GFP_KERNEL);
 
 	lbs_deb_leave(LBS_DEB_CFG80211);
 }
@@ -1458,7 +1457,7 @@
 
 	cfg80211_disconnected(priv->dev,
 			reason,
-			NULL, 0,
+			NULL, 0, true,
 			GFP_KERNEL);
 	priv->connect_status = LBS_DISCONNECTED;
 
@@ -2031,7 +2030,7 @@
 	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
 
 	/* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
-	lbs_mac_event_disconnected(priv);
+	lbs_mac_event_disconnected(priv, true);
 
 	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
 	return ret;
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index 10995f5..acccc29 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -10,7 +10,8 @@
 int lbs_cfg_register(struct lbs_private *priv);
 void lbs_cfg_free(struct lbs_private *priv);
 
-void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+				      bool locally_generated);
 void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
 
 void lbs_scan_done(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 4279e8a..0c5444b 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -68,7 +68,8 @@
 
 /* From cmdresp.c */
 
-void lbs_mac_event_disconnected(struct lbs_private *priv);
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+				bool locally_generated);
 
 
 
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 65f18f1..e5442e8 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -19,10 +19,13 @@
  * reset link state etc.
  *
  * @priv:	A pointer to struct lbs_private structure
+ * @locally_generated: indicates disconnect was requested locally
+ *		(usually by userspace)
  *
  * returns:	n/a
  */
-void lbs_mac_event_disconnected(struct lbs_private *priv)
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+				bool locally_generated)
 {
 	if (priv->connect_status != LBS_CONNECTED)
 		return;
@@ -36,7 +39,7 @@
 	msleep_interruptible(1000);
 
 	if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
-		lbs_send_disconnect_notification(priv);
+		lbs_send_disconnect_notification(priv, locally_generated);
 
 	/* report disconnect to upper layer */
 	netif_stop_queue(priv->dev);
@@ -229,17 +232,17 @@
 
 	case MACREG_INT_CODE_DEAUTHENTICATED:
 		lbs_deb_cmd("EVENT: deauthenticated\n");
-		lbs_mac_event_disconnected(priv);
+		lbs_mac_event_disconnected(priv, false);
 		break;
 
 	case MACREG_INT_CODE_DISASSOCIATED:
 		lbs_deb_cmd("EVENT: disassociated\n");
-		lbs_mac_event_disconnected(priv);
+		lbs_mac_event_disconnected(priv, false);
 		break;
 
 	case MACREG_INT_CODE_LINK_LOST_NO_SCAN:
 		lbs_deb_cmd("EVENT: link lost\n");
-		lbs_mac_event_disconnected(priv);
+		lbs_mac_event_disconnected(priv, true);
 		break;
 
 	case MACREG_INT_CODE_PS_SLEEP:
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ed02e4b..a47f0ac 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -439,7 +439,7 @@
 	return mc_count;
 }
 
-#define SUPPORTED_FIF_FLAGS  (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
+#define SUPPORTED_FIF_FLAGS  FIF_ALLMULTI
 static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
 			unsigned int changed_flags,
 			unsigned int *new_flags,
@@ -458,10 +458,7 @@
 		return;
 	}
 
-	if (*new_flags & (FIF_PROMISC_IN_BSS))
-		priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
-	else
-		priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
+	priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
 	if (*new_flags & (FIF_ALLMULTI) ||
 	    multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
 		priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
@@ -637,7 +634,7 @@
 	priv->tx_skb = NULL;
 
 	hw->queues = 1;
-	hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
 	hw->extra_tx_headroom = sizeof(struct txpd);
 	memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
 	memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d5c0a1a..99e873d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1286,7 +1286,7 @@
 	if (control->sta)
 		hwsim_check_sta_magic(control->sta);
 
-	if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+	if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
 		ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
 				       txi->control.rates,
 				       ARRAY_SIZE(txi->control.rates));
@@ -1395,7 +1395,7 @@
 {
 	u32 _pid = ACCESS_ONCE(wmediumd_portid);
 
-	if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
+	if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
 		struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
 		ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
 				       txi->control.rates,
@@ -1432,7 +1432,7 @@
 	if (skb == NULL)
 		return;
 	info = IEEE80211_SKB_CB(skb);
-	if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+	if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
 		ieee80211_get_tx_rates(vif, NULL, skb,
 				       info->control.rates,
 				       ARRAY_SIZE(info->control.rates));
@@ -1554,8 +1554,6 @@
 	wiphy_debug(hw->wiphy, "%s\n", __func__);
 
 	data->rx_filter = 0;
-	if (*total_flags & FIF_PROMISC_IN_BSS)
-		data->rx_filter |= FIF_PROMISC_IN_BSS;
 	if (*total_flags & FIF_ALLMULTI)
 		data->rx_filter |= FIF_ALLMULTI;
 
@@ -2393,15 +2391,16 @@
 	if (param->p2p_device)
 		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
 
-	hw->flags = IEEE80211_HW_MFP_CAPABLE |
-		    IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_WANT_MONITOR_VIF |
-		    IEEE80211_HW_QUEUE_CONTROL |
-		    IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
-		    IEEE80211_HW_CHANCTX_STA_CSA;
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
 	if (rctbl)
-		hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
+		ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
 
 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
 			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -2438,6 +2437,31 @@
 			sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
 			sband->bitrates = data->rates + 4;
 			sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+
+			sband->vht_cap.vht_supported = true;
+			sband->vht_cap.cap =
+				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+				IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+				IEEE80211_VHT_CAP_RXLDPC |
+				IEEE80211_VHT_CAP_SHORT_GI_80 |
+				IEEE80211_VHT_CAP_SHORT_GI_160 |
+				IEEE80211_VHT_CAP_TXSTBC |
+				IEEE80211_VHT_CAP_RXSTBC_1 |
+				IEEE80211_VHT_CAP_RXSTBC_2 |
+				IEEE80211_VHT_CAP_RXSTBC_3 |
+				IEEE80211_VHT_CAP_RXSTBC_4 |
+				IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+			sband->vht_cap.vht_mcs.rx_mcs_map =
+				cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 14);
+			sband->vht_cap.vht_mcs.tx_mcs_map =
+				sband->vht_cap.vht_mcs.rx_mcs_map;
 			break;
 		default:
 			continue;
@@ -2458,31 +2482,6 @@
 		sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
 		hw->wiphy->bands[band] = sband;
-
-		sband->vht_cap.vht_supported = true;
-		sband->vht_cap.cap =
-			IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
-			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
-			IEEE80211_VHT_CAP_RXLDPC |
-			IEEE80211_VHT_CAP_SHORT_GI_80 |
-			IEEE80211_VHT_CAP_SHORT_GI_160 |
-			IEEE80211_VHT_CAP_TXSTBC |
-			IEEE80211_VHT_CAP_RXSTBC_1 |
-			IEEE80211_VHT_CAP_RXSTBC_2 |
-			IEEE80211_VHT_CAP_RXSTBC_3 |
-			IEEE80211_VHT_CAP_RXSTBC_4 |
-			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
-		sband->vht_cap.vht_mcs.rx_mcs_map =
-			cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
-				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
-		sband->vht_cap.vht_mcs.tx_mcs_map =
-			sband->vht_cap.vht_mcs.rx_mcs_map;
 	}
 
 	/* By default all radios belong to the first group */
@@ -2510,7 +2509,7 @@
 	}
 
 	if (param->no_vif)
-		hw->flags |= IEEE80211_HW_NO_AUTO_VIF;
+		ieee80211_hw_set(hw, NO_AUTO_VIF);
 
 	err = ieee80211_register_hw(hw);
 	if (err < 0) {
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
new file mode 100644
index 0000000..cba300c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -0,0 +1,10 @@
+menuconfig WL_MEDIATEK
+	bool "Mediatek Wireless LAN support"
+	---help---
+	  Enable community drivers for MediaTek WiFi devices.
+	  Those drivers make use of the Linux mac80211 stack.
+
+
+if WL_MEDIATEK
+source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+endif # WL_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
new file mode 100644
index 0000000..9d5f182
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MT7601U)	+= mt7601u/
diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig
new file mode 100644
index 0000000..f46bed9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig
@@ -0,0 +1,6 @@
+config MT7601U
+	tristate "MediaTek MT7601U (USB) support"
+	depends on MAC80211
+	depends on USB
+	---help---
+	  This adds support for MT7601U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
new file mode 100644
index 0000000..ea9ed8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_MT7601U)	+= mt7601u.o
+
+mt7601u-objs	= \
+	usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
+	mac.o util.o debugfs.o tx.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c
new file mode 100644
index 0000000..0aabd79
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/core.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
+{
+	int i = 100;
+	u32 val;
+
+	do {
+		if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+			return -EIO;
+
+		val = mt7601u_rr(dev, MT_MAC_CSR0);
+		if (val && ~val)
+			return 0;
+
+		udelay(10);
+	} while (i--);
+
+	return -EIO;
+}
+
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+	       int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+			return false;
+
+		cur = mt7601u_rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		udelay(10);
+	} while (timeout-- > 0);
+
+	dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+	return false;
+}
+
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+		    int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+			return false;
+
+		cur = mt7601u_rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		msleep(10);
+	} while (timeout-- > 0);
+
+	dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+	return false;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
new file mode 100644
index 0000000..fc00847
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+	struct mt7601u_dev *dev = data;
+
+	mt76_wr(dev, dev->debugfs_reg, val);
+	return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+	struct mt7601u_dev *dev = data;
+
+	*val = mt76_rr(dev, dev->debugfs_reg);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+{
+	struct mt7601u_dev *dev = file->private;
+	int i, j;
+
+#define stat_printf(grp, off, name)					\
+	seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+	stat_printf(rx_stat, 0, rx_crc_err);
+	stat_printf(rx_stat, 1, rx_phy_err);
+	stat_printf(rx_stat, 2, rx_false_cca);
+	stat_printf(rx_stat, 3, rx_plcp_err);
+	stat_printf(rx_stat, 4, rx_fifo_overflow);
+	stat_printf(rx_stat, 5, rx_duplicate);
+
+	stat_printf(tx_stat, 0, tx_fail_cnt);
+	stat_printf(tx_stat, 1, tx_bcn_cnt);
+	stat_printf(tx_stat, 2, tx_success);
+	stat_printf(tx_stat, 3, tx_retransmit);
+	stat_printf(tx_stat, 4, tx_zero_len);
+	stat_printf(tx_stat, 5, tx_underflow);
+
+	stat_printf(aggr_stat, 0, non_aggr_tx);
+	stat_printf(aggr_stat, 1, aggr_tx);
+
+	stat_printf(zero_len_del, 0, tx_zero_len_del);
+	stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+	seq_puts(file, "Aggregations stats:\n");
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%08llx ",
+				   dev->stats.aggr_n[i * 8 + j]);
+		seq_putc(file, '\n');
+	}
+
+	seq_printf(file, "recent average AMPDU len: %d\n",
+		   atomic_read(&dev->avg_ampdu_len));
+
+	return 0;
+}
+
+static int
+mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+	.open = mt7601u_ampdu_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int
+mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+{
+	struct mt7601u_dev *dev = file->private;
+	struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
+	struct tssi_data *td = &dev->ee->tssi_data;
+	int i;
+
+	seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+	seq_printf(file, "RSSI offset: %hhx %hhx\n",
+		   dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
+	seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
+	seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
+	seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+		   dev->ee->reg.start + dev->ee->reg.num - 1);
+
+	seq_puts(file, "Per rate power:\n");
+	for (i = 0; i < 2; i++)
+		seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+			   rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
+	for (i = 0; i < 4; i++)
+		seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+			   rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
+	for (i = 0; i < 4; i++)
+		seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+			   rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
+
+	seq_puts(file, "Per channel power:\n");
+	for (i = 0; i < 7; i++)
+		seq_printf(file, "\t tx_power  ch%u:%02hhx ch%u:%02hhx\n",
+			   i * 2 + 1, dev->ee->chan_pwr[i * 2],
+			   i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
+
+	if (!dev->ee->tssi_enabled)
+		return 0;
+
+	seq_puts(file, "TSSI:\n");
+	seq_printf(file, "\t slope:%02hhx\n", td->slope);
+	seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
+		   td->offset[0], td->offset[1], td->offset[2]);
+	seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
+
+	return 0;
+}
+
+static int
+mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+	.open = mt7601u_eeprom_param_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
+	if (!dir)
+		return;
+
+	debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp);
+	debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode);
+
+	debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+	debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+			    &fops_regval);
+	debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+	debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+			    &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
new file mode 100644
index 0000000..7217da4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+				 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+	const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+	unsigned int hdrlen;
+
+	if (unlikely(len < 10))
+		return 0;
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	if (unlikely(hdrlen > len))
+		return 0;
+	return hdrlen;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+			void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+	struct sk_buff *skb;
+	u32 true_len, hdr_len = 0, copy, frag;
+
+	skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
+	if (!true_len || true_len > seg_len)
+		goto bad_frame;
+
+	hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+	if (!hdr_len)
+		goto bad_frame;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+		memcpy(skb_put(skb, hdr_len), data, hdr_len);
+
+		data += hdr_len + 2;
+		true_len -= hdr_len;
+		hdr_len = 0;
+	}
+
+	/* If not doing paged RX allocated skb will always have enough space */
+	copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+	frag = true_len - copy;
+
+	memcpy(skb_put(skb, copy), data, copy);
+	data += copy;
+
+	if (frag) {
+		skb_add_rx_frag(skb, 0, p, data - page_address(p),
+				frag, truesize);
+		get_page(p);
+	}
+
+	return skb;
+
+bad_frame:
+	dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
+			    true_len, hdr_len);
+	dev_kfree_skb(skb);
+	return NULL;
+}
+
+static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
+				   u32 seg_len, struct page *p)
+{
+	struct sk_buff *skb;
+	struct mt7601u_rxwi *rxwi;
+	u32 fce_info, truesize = seg_len;
+
+	/* DMA_INFO field at the beginning of the segment contains only some of
+	 * the information, we need to read the FCE descriptor from the end.
+	 */
+	fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+	seg_len -= MT_FCE_INFO_LEN;
+
+	data += MT_DMA_HDR_LEN;
+	seg_len -= MT_DMA_HDR_LEN;
+
+	rxwi = (struct mt7601u_rxwi *) data;
+	data += sizeof(struct mt7601u_rxwi);
+	seg_len -= sizeof(struct mt7601u_rxwi);
+
+	if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
+		dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
+	if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+		dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
+
+	trace_mt_rx(dev, rxwi, fce_info);
+
+	skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+	if (!skb)
+		return;
+
+	ieee80211_rx_ni(dev->hw, skb);
+}
+
+static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+{
+	u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+		sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
+	u16 dma_len = get_unaligned_le16(data);
+
+	if (data_len < min_seg_len ||
+	    WARN_ON(!dma_len) ||
+	    WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+	    WARN_ON(dma_len & 0x3))
+		return 0;
+
+	return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
+{
+	u32 seg_len, data_len = e->urb->actual_length;
+	u8 *data = page_address(e->p);
+	struct page *new_p = NULL;
+	int cnt = 0;
+
+	if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+		return;
+
+	/* Copy if there is very little data in the buffer. */
+	if (data_len > 512)
+		new_p = dev_alloc_pages(MT_RX_ORDER);
+
+	while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
+		mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+		data_len -= seg_len;
+		data += seg_len;
+		cnt++;
+	}
+
+	if (cnt > 1)
+		trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
+
+	if (new_p) {
+		/* we have one extra ref from the allocator */
+		__free_pages(e->p, MT_RX_ORDER);
+
+		e->p = new_p;
+	}
+}
+
+static struct mt7601u_dma_buf_rx *
+mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
+{
+	struct mt7601u_rx_queue *q = &dev->rx_q;
+	struct mt7601u_dma_buf_rx *buf = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	if (!q->pending)
+		goto out;
+
+	buf = &q->e[q->start];
+	q->pending--;
+	q->start = (q->start + 1) % q->entries;
+out:
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+	return buf;
+}
+
+static void mt7601u_complete_rx(struct urb *urb)
+{
+	struct mt7601u_dev *dev = urb->context;
+	struct mt7601u_rx_queue *q = &dev->rx_q;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	if (mt7601u_urb_has_error(urb))
+		dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+	if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+		goto out;
+
+	q->end = (q->end + 1) % q->entries;
+	q->pending++;
+	tasklet_schedule(&dev->rx_tasklet);
+out:
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt7601u_rx_tasklet(unsigned long data)
+{
+	struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+	struct mt7601u_dma_buf_rx *e;
+
+	while ((e = mt7601u_rx_get_pending_entry(dev))) {
+		if (e->urb->status)
+			continue;
+
+		mt7601u_rx_process_entry(dev, e);
+		mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
+	}
+}
+
+static void mt7601u_complete_tx(struct urb *urb)
+{
+	struct mt7601u_tx_queue *q = urb->context;
+	struct mt7601u_dev *dev = q->dev;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	if (mt7601u_urb_has_error(urb))
+		dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+		goto out;
+
+	skb = q->e[q->start].skb;
+	trace_mt_tx_dma_done(dev, skb);
+
+	mt7601u_tx_status(dev, skb);
+
+	if (q->used == q->entries - q->entries / 8)
+		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
+
+	q->start = (q->start + 1) % q->entries;
+	q->used--;
+
+	if (urb->status)
+		goto out;
+
+	set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
+	if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(10));
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+				 struct sk_buff *skb, u8 ep)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
+	struct mt7601u_dma_buf_tx *e;
+	struct mt7601u_tx_queue *q = &dev->tx_q[ep];
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	if (WARN_ON(q->entries <= q->used)) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	e = &q->e[q->end];
+	e->skb = skb;
+	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+			  mt7601u_complete_tx, q);
+	ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+	if (ret) {
+		/* Special-handle ENODEV from TX urb submission because it will
+		 * often be the first ENODEV we see after device is removed.
+		 */
+		if (ret == -ENODEV)
+			set_bit(MT7601U_STATE_REMOVED, &dev->state);
+		else
+			dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
+				ret);
+		goto out;
+	}
+
+	q->end = (q->end + 1) % q->entries;
+	q->used++;
+
+	if (q->used >= q->entries)
+		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	return ret;
+}
+
+/* Map hardware Q to USB endpoint number */
+static u8 q2ep(u8 qid)
+{
+	/* TODO: take management packets to queue 5 */
+	return qid + 1;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+	if (ep == 5)
+		return MT_QSEL_MGMT;
+	return MT_QSEL_EDCA;
+}
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			   struct mt76_wcid *wcid, int hw_q)
+{
+	u8 ep = q2ep(hw_q);
+	u32 dma_flags;
+	int ret;
+
+	dma_flags = MT_TXD_PKT_INFO_80211;
+	if (wcid->hw_key_idx == 0xff)
+		dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+	ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+	if (ret)
+		return ret;
+
+	ret = mt7601u_dma_submit_tx(dev, skb, ep);
+	if (ret) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		int next = dev->rx_q.end;
+
+		spin_unlock_irqrestore(&dev->rx_lock, flags);
+		usb_poison_urb(dev->rx_q.e[next].urb);
+		spin_lock_irqsave(&dev->rx_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+				 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	u8 *buf = page_address(e->p);
+	unsigned pipe;
+	int ret;
+
+	pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
+
+	usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+			  mt7601u_complete_rx, dev);
+
+	trace_mt_submit_urb(dev, e->urb);
+	ret = usb_submit_urb(e->urb, gfp);
+	if (ret)
+		dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
+
+	return ret;
+}
+
+static int mt7601u_submit_rx(struct mt7601u_dev *dev)
+{
+	int i, ret;
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mt7601u_free_rx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+		usb_free_urb(dev->rx_q.e[i].urb);
+	}
+}
+
+static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+	dev->rx_q.dev = dev;
+	dev->rx_q.entries = N_RX_ENTRIES;
+
+	for (i = 0; i < N_RX_ENTRIES; i++) {
+		dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+		if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+{
+	int i;
+
+	WARN_ON(q->used);
+
+	for (i = 0; i < q->entries; i++)  {
+		usb_poison_urb(q->e[i].urb);
+		usb_free_urb(q->e[i].urb);
+	}
+}
+
+static void mt7601u_free_tx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < __MT_EP_OUT_MAX; i++)
+		mt7601u_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
+				  struct mt7601u_tx_queue *q)
+{
+	int i;
+
+	q->dev = dev;
+	q->entries = N_TX_ENTRIES;
+
+	for (i = 0; i < N_TX_ENTRIES; i++) {
+		q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!q->e[i].urb)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
+				 sizeof(*dev->tx_q), GFP_KERNEL);
+
+	for (i = 0; i < __MT_EP_OUT_MAX; i++)
+		if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
+			return -ENOMEM;
+
+	return 0;
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev)
+{
+	int ret = -ENOMEM;
+
+	tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
+
+	ret = mt7601u_alloc_tx(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_alloc_rx(dev);
+	if (ret)
+		goto err;
+
+	ret = mt7601u_submit_rx(dev);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	mt7601u_dma_cleanup(dev);
+	return ret;
+}
+
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
+{
+	mt7601u_kill_rx(dev);
+
+	tasklet_kill(&dev->rx_tasklet);
+
+	mt7601u_free_rx(dev);
+	mt7601u_free_tx(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
new file mode 100644
index 0000000..978e8a9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_DMA_H
+#define __MT7601U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#include "util.h"
+
+#define MT_DMA_HDR_LEN			4
+#define MT_RX_INFO_LEN			4
+#define MT_FCE_INFO_LEN			4
+#define MT_DMA_HDRS			(MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN			GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT		GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE		GENMASK(31, 30)
+
+enum mt76_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
+enum mt76_info_type {
+	DMA_PACKET,
+	DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD	BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST	BIT(17)
+#define MT_TXD_PKT_INFO_80211		BIT(19)
+#define MT_TXD_PKT_INFO_TSO		BIT(20)
+#define MT_TXD_PKT_INFO_CSO		BIT(21)
+#define MT_TXD_PKT_INFO_WIV		BIT(24)
+#define MT_TXD_PKT_INFO_QSEL		GENMASK(26, 25)
+
+enum mt76_qsel {
+	MT_QSEL_MGMT,
+	MT_QSEL_HCCA,
+	MT_QSEL_EDCA,
+	MT_QSEL_EDCA_2,
+};
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_INFO_SEQ		GENMASK(19, 16)
+#define MT_TXD_CMD_INFO_TYPE		GENMASK(26, 20)
+
+static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
+				       enum mt76_msg_port d_port,
+				       enum mt76_info_type type, u32 flags)
+{
+	u32 info;
+
+	/* Buffer layout:
+	 *	|   4B   | xfer len |      pad       |  4B  |
+	 *	| TXINFO | pkt/cmd  | zero pad to 4B | zero |
+	 *
+	 * length field of TXINFO should be set to 'xfer len'.
+	 */
+
+	info = flags |
+		MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+		MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
+		MT76_SET(MT_TXD_INFO_TYPE, type);
+
+	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+	return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+	flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+	return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN			GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR		BIT(24)
+#define MT_RXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_RXD_INFO_PORT		GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE		GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR		BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR		BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR		BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211	BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE	BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN		GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN	BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE	GENMASK(23, 20)
+
+enum mt76_evt_type {
+	CMD_DONE,
+	CMD_ERROR,
+	CMD_RETRY,
+	EVENT_PWR_RSP,
+	EVENT_WOW_RSP,
+	EVENT_CARRIER_DETECT_RSP,
+	EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
new file mode 100644
index 0000000..8d8ee03
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+	return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+	if (!field_valid(val))
+		return 0;
+
+	return val;
+}
+
+static int
+mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
+		   enum mt7601u_eeprom_access_modes mode)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+	       MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+	       MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+		/* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+		 * will not return valid data but it's ok.
+		 */
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, MT_EFUSE_DATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int
+mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
+{
+	const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+	u8 data[map_reads * 16];
+	int ret, i;
+	u32 start = 0, end = 0, cnt_free;
+
+	for (i = 0; i < map_reads; i++) {
+		ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+					 data + i * 16, MT_EE_PHYSICAL_READ);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+		if (!data[i]) {
+			if (!start)
+				start = MT_EE_USAGE_MAP_START + i;
+			end = MT_EE_USAGE_MAP_START + i;
+		}
+	cnt_free = end - start + 1;
+
+	if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+		dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static bool
+mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+	return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static void
+mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+	if (!field_valid(nic_conf1 & 0xff))
+		nic_conf1 &= 0xff00;
+
+	dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
+				!(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
+
+	if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+		dev_err(dev->dev,
+			"Error: this driver does not support HW RF ctrl\n");
+
+	if (!field_valid(nic_conf0 >> 8))
+		return;
+
+	if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+	    MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+		dev_err(dev->dev,
+			"Error: device has more than 1 RX/TX stream!\n");
+}
+
+static int
+mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
+{
+	const void *src = eeprom + MT_EE_MAC_ADDR;
+
+	ether_addr_copy(dev->macaddr, src);
+
+	if (!is_valid_ether_addr(dev->macaddr)) {
+		eth_random_addr(dev->macaddr);
+		dev_info(dev->dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->macaddr);
+	}
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+		MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+	return 0;
+}
+
+static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
+					     u8 *eeprom, u8 max_pwr)
+{
+	u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
+
+	if (trgt_pwr > max_pwr || !trgt_pwr) {
+		dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
+			 trgt_pwr);
+		trgt_pwr = 0x20;
+	}
+
+	memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
+}
+
+static void
+mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u32 i, val;
+	u8 max_pwr;
+
+	val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
+	max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+
+	if (mt7601u_has_tssi(dev, eeprom)) {
+		mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
+		return;
+	}
+
+	for (i = 0; i < 14; i++) {
+		s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
+
+		if (power > max_pwr || power < 0)
+			power = MT7601U_DEFAULT_TX_POWER;
+
+		dev->ee->chan_pwr[i] = power;
+	}
+}
+
+static void
+mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	/* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
+	 *	 - comments in rtmp_def.h are incorrect (see rt_channel.c)
+	 */
+	static const struct reg_channel_bounds chan_bounds[] = {
+		/* EEPROM country regions 0 - 7 */
+		{  1, 11 },	{  1, 13 },	{ 10,  2 },	{ 10,  4 },
+		{ 14,  1 },	{  1, 14 },	{  3,  7 },	{  5,  9 },
+		/* EEPROM country regions 32 - 33 */
+		{  1, 11 },	{  1, 14 }
+	};
+	u8 val = eeprom[MT_EE_COUNTRY_REGION];
+	int idx = -1;
+
+	if (val < 8)
+		idx = val;
+	if (val > 31 && val < 33)
+		idx = val - 32 + 8;
+
+	if (idx != -1)
+		dev_info(dev->dev,
+			 "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+			 val, chan_bounds[idx].start,
+			 chan_bounds[idx].start + chan_bounds[idx].num - 1);
+	else
+		idx = 5; /* channels 1 - 14 */
+
+	dev->ee->reg = chan_bounds[idx];
+
+	/* TODO: country region 33 is special - phy should be set to B-mode
+	 *	 before entering channel 14 (see sta/connect.c)
+	 */
+}
+
+static void
+mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u8 comp;
+
+	dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+	comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+	if (comp & BIT(7))
+		dev->ee->rf_freq_off -= comp & 0x7f;
+	else
+		dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	int i;
+	s8 *rssi_offset = dev->ee->rssi_offset;
+
+	for (i = 0; i < 2; i++) {
+		rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+		if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+			dev_warn(dev->dev,
+				 "Warning: EEPROM RSSI is invalid %02hhx\n",
+				 rssi_offset[i]);
+			rssi_offset[i] = 0;
+		}
+	}
+}
+
+static void
+mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
+{
+	u32 val;
+
+	val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
+	val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
+	mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
+
+	val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+	mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
+{
+	/* Invalid? Note: vendor driver does not handle this */
+	if (value == 0xff)
+		return;
+
+	rate->raw = s6_validate(value);
+	rate->bw20 = s6_to_int(value);
+	/* Note: vendor driver does cap the value to s6 right away */
+	rate->bw40 = rate->bw20 + delta;
+}
+
+static void
+mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
+{
+	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+	switch (i) {
+	case 0:
+		mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
+		mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
+		/* Save cck bw20 for fixups of channel 14 */
+		dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
+		dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
+
+		mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
+		mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
+		break;
+	case 1:
+		mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
+		mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
+		mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
+		mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
+		break;
+	case 2:
+		mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
+		mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
+		break;
+	}
+}
+
+static s8
+get_delta(u8 val)
+{
+	s8 ret;
+
+	if (!field_valid(val) || !(val & BIT(7)))
+		return 0;
+
+	ret = val & 0x1f;
+	if (ret > 8)
+		ret = 8;
+	if (val & BIT(6))
+		ret = -ret;
+
+	return ret;
+}
+
+static void
+mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u32 val;
+	s8 bw40_delta;
+	int i;
+
+	bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+
+	for (i = 0; i < 5; i++) {
+		val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+		mt7601u_save_power_rate(dev, bw40_delta, val, i);
+
+		if (~val)
+			mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
+	}
+
+	mt7601u_extra_power_over_mac(dev);
+}
+
+static void
+mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	struct tssi_data *d = &dev->ee->tssi_data;
+
+	if (!dev->ee->tssi_enabled)
+		return;
+
+	d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
+	d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
+	d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
+	d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
+	d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
+}
+
+int
+mt7601u_eeprom_init(struct mt7601u_dev *dev)
+{
+	u8 *eeprom;
+	int i, ret;
+
+	ret = mt7601u_efuse_physical_size_check(dev);
+	if (ret)
+		return ret;
+
+	dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
+	if (!dev->ee)
+		return -ENOMEM;
+
+	eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
+	if (!eeprom)
+		return -ENOMEM;
+
+	for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
+		ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+		if (ret)
+			goto out;
+	}
+
+	if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
+		dev_warn(dev->dev,
+			 "Warning: unsupported EEPROM version %02hhx\n",
+			 eeprom[MT_EE_VERSION_EE]);
+	dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+		 eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+	mt7601u_set_macaddr(dev, eeprom);
+	mt7601u_set_chip_cap(dev, eeprom);
+	mt7601u_set_channel_power(dev, eeprom);
+	mt7601u_set_country_reg(dev, eeprom);
+	mt7601u_set_rf_freq_off(dev, eeprom);
+	mt7601u_set_rssi_offset(dev, eeprom);
+	dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
+	dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
+
+	mt7601u_config_tx_power_per_rate(dev, eeprom);
+
+	mt7601u_init_tssi_params(dev, eeprom);
+out:
+	kfree(eeprom);
+	return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
new file mode 100644
index 0000000..662d127
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_EEPROM_H
+#define __MT7601U_EEPROM_H
+
+struct mt7601u_dev;
+
+#define MT7601U_EE_MAX_VER			0x0c
+#define MT7601U_EEPROM_SIZE			256
+
+#define MT7601U_DEFAULT_TX_POWER		6
+
+enum mt76_eeprom_field {
+	MT_EE_CHIP_ID =				0x00,
+	MT_EE_VERSION_FAE =			0x02,
+	MT_EE_VERSION_EE =			0x03,
+	MT_EE_MAC_ADDR =			0x04,
+	MT_EE_NIC_CONF_0 =			0x34,
+	MT_EE_NIC_CONF_1 =			0x36,
+	MT_EE_COUNTRY_REGION =			0x39,
+	MT_EE_FREQ_OFFSET =			0x3a,
+	MT_EE_NIC_CONF_2 =			0x42,
+
+	MT_EE_LNA_GAIN =			0x44,
+	MT_EE_RSSI_OFFSET =			0x46,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x50,
+	MT_EE_TX_POWER_OFFSET =			0x52,
+
+	MT_EE_TX_TSSI_SLOPE =			0x6e,
+	MT_EE_TX_TSSI_OFFSET_GROUP =		0x6f,
+	MT_EE_TX_TSSI_OFFSET =			0x76,
+
+	MT_EE_TX_TSSI_TARGET_POWER =		0xd0,
+	MT_EE_REF_TEMP =			0xd1,
+	MT_EE_FREQ_OFFSET_COMPENSATION =	0xdb,
+	MT_EE_TX_POWER_BYRATE_BASE =		0xde,
+
+	MT_EE_USAGE_MAP_START =			0x1e0,
+	MT_EE_USAGE_MAP_END =			0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE		GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL		BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC		BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G		BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G		BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN		BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV		BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION		GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE		BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD		GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i)		(MT_EE_TX_POWER_BYRATE_BASE + \
+						 (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE			(MT_EE_USAGE_MAP_END -	\
+						 MT_EE_USAGE_MAP_START + 1)
+
+enum mt7601u_eeprom_access_modes {
+	MT_EE_READ = 0,
+	MT_EE_PHYSICAL_READ = 1,
+};
+
+struct power_per_rate  {
+	u8 raw;  /* validated s6 value */
+	s8 bw20; /* sign-extended int */
+	s8 bw40; /* sign-extended int */
+};
+
+/* Power per rate - one value per two rates */
+struct mt7601u_rate_power {
+	struct power_per_rate cck[2];
+	struct power_per_rate ofdm[4];
+	struct power_per_rate ht[4];
+};
+
+struct reg_channel_bounds {
+	u8 start;
+	u8 num;
+};
+
+struct mt7601u_eeprom_params {
+	bool tssi_enabled;
+	u8 rf_freq_off;
+	s8 rssi_offset[2];
+	s8 ref_temp;
+	s8 lna_gain;
+
+	u8 chan_pwr[14];
+	struct mt7601u_rate_power power_rate_table;
+	s8 real_cck_bw20[2];
+
+	/* TSSI stuff - only with internal TX ALC */
+	struct tssi_data {
+		int tx0_delta_offset;
+		u8 slope;
+		u8 offset[3];
+	} tssi_data;
+
+	struct reg_channel_bounds reg;
+};
+
+int mt7601u_eeprom_init(struct mt7601u_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+	WARN_ON(reg & ~GENMASK(5, 0));
+	return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+	int s6;
+
+	s6 = s6_validate(reg);
+	if (s6 & BIT(5))
+		s6 -= BIT(6);
+
+	return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+	if (val < -0x20)
+		return 0x20;
+	if (val > 0x1f)
+		return 0x1f;
+
+	return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
new file mode 100644
index 0000000..df3dd56
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -0,0 +1,628 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+
+#include "initvals.h"
+
+static void
+mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
+{
+	int i;
+
+	/* Note: we don't turn off WLAN_CLK because that makes the device
+	 *	 not respond properly on the probe path.
+	 *	 In case anyone (PSM?) wants to use this function we can
+	 *	 bring the clock stuff back and fixup the probe path.
+	 */
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+	mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	if (enable) {
+		set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+	} else {
+		clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+		return;
+	}
+
+	for (i = 200; i; i--) {
+		val = mt7601u_rr(dev, MT_CMB_CTRL);
+
+		if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+			break;
+
+		udelay(20);
+	}
+
+	/* Note: vendor driver tries to disable/enable wlan here and retry
+	 *       but the code which does it is so buggy it must have never
+	 *       triggered, so don't bother.
+	 */
+	if (!i)
+		dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
+}
+
+static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
+{
+	u32 val;
+
+	mutex_lock(&dev->hw_atomic_mutex);
+
+	val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (reset) {
+		val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+		val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+		if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+			val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+				MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+			mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+			udelay(20);
+
+			val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+				 MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+		}
+	}
+
+	mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt7601u_set_wlan_state(dev, val, enable);
+
+	mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
+{
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
+					  MT_MAC_SYS_CTRL_RESET_BBP));
+	mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
+	msleep(1);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+}
+
+static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
+{
+	u32 val;
+
+	val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+	      MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+	      MT_USB_DMA_CFG_RX_BULK_EN |
+	      MT_USB_DMA_CFG_TX_BULK_EN;
+	if (dev->in_max_packet == 512)
+		val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+	val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+	val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static int mt7601u_init_bbp(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	ret = mt7601u_wait_bbp_ready(dev);
+	if (ret)
+		return ret;
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
+				      ARRAY_SIZE(bbp_common_vals));
+	if (ret)
+		return ret;
+
+	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
+				       ARRAY_SIZE(bbp_chip_vals));
+}
+
+static void
+mt76_init_beacon_offsets(struct mt7601u_dev *dev)
+{
+	u16 base = MT_BEACON_BASE;
+	u32 regs[4] = {};
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		u16 addr = dev->beacon_offsets[i];
+
+		regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+	}
+
+	for (i = 0; i < 4; i++)
+		mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
+				      ARRAY_SIZE(mac_common_vals));
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
+				      mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
+	if (ret)
+		return ret;
+
+	mt76_init_beacon_offsets(dev);
+
+	mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
+
+	return 0;
+}
+
+static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
+{
+	u32 *vals;
+	int i, ret;
+
+	vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+	if (!vals)
+		return -ENOMEM;
+
+	for (i = 0; i < N_WCIDS; i++)  {
+		vals[i * 2] = 0xffffffff;
+		vals[i * 2 + 1] = 0x00ffffff;
+	}
+
+	ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+				       vals, N_WCIDS * 2);
+	kfree(vals);
+
+	return ret;
+}
+
+static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
+{
+	u32 vals[4] = {};
+
+	return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+					vals, ARRAY_SIZE(vals));
+}
+
+static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
+{
+	u32 *vals;
+	int i, ret;
+
+	vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+	if (!vals)
+		return -ENOMEM;
+
+	for (i = 0; i < N_WCIDS * 2; i++)
+		vals[i] = 1;
+
+	ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+				       vals, N_WCIDS * 2);
+	kfree(vals);
+
+	return ret;
+}
+
+static void mt7601u_reset_counters(struct mt7601u_dev *dev)
+{
+	mt7601u_rr(dev, MT_RX_STA_CNT0);
+	mt7601u_rr(dev, MT_RX_STA_CNT1);
+	mt7601u_rr(dev, MT_RX_STA_CNT2);
+	mt7601u_rr(dev, MT_TX_STA_CNT0);
+	mt7601u_rr(dev, MT_TX_STA_CNT1);
+	mt7601u_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt7601u_mac_start(struct mt7601u_dev *dev)
+{
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		       MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+		return -ETIMEDOUT;
+
+	dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+		MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+		MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+		MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+		MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+		MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+		MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+	mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		       MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
+{
+	int i, ok;
+
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+		   MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+		   MT_BEACON_TIME_CFG_BEACON_TX);
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+		dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
+
+	/* Page count on TxQ */
+	i = 200;
+	while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+		       (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+		       (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+		msleep(10);
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+		dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+					 MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	/* Page count on RxQ */
+	ok = 0;
+	i = 200;
+	while (i--) {
+		if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
+		    (mt76_rr(dev, 0x0a30) & 0xffffffff) ||
+		    (mt76_rr(dev, 0x0a34) & 0xffffffff))
+			ok++;
+		if (ok > 6)
+			break;
+
+		msleep(1);
+	}
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+		dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+		dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt7601u_mac_stop(struct mt7601u_dev *dev)
+{
+	mt7601u_mac_stop_hw(dev);
+	flush_delayed_work(&dev->stat_work);
+	cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
+{
+	mt7601u_chip_onoff(dev, false, false);
+}
+
+int mt7601u_init_hardware(struct mt7601u_dev *dev)
+{
+	static const u16 beacon_offsets[16] = {
+		/* 512 byte per beacon */
+		0xc000,	0xc200,	0xc400,	0xc600,
+		0xc800,	0xca00,	0xcc00,	0xce00,
+		0xd000,	0xd200,	0xd400,	0xd600,
+		0xd800,	0xda00,	0xdc00,	0xde00
+	};
+	int ret;
+
+	dev->beacon_offsets = beacon_offsets;
+
+	mt7601u_chip_onoff(dev, true, false);
+
+	ret = mt7601u_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_mcu_init(dev);
+	if (ret)
+		goto err;
+
+	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+		ret = -EIO;
+		goto err;
+	}
+
+	/* Wait for ASIC ready after FW load. */
+	ret = mt7601u_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+
+	mt7601u_reset_csr_bbp(dev);
+	mt7601u_init_usb_dma(dev);
+
+	ret = mt7601u_mcu_cmd_init(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_dma_init(dev);
+	if (ret)
+		goto err_mcu;
+	ret = mt7601u_write_mac_initvals(dev);
+	if (ret)
+		goto err_rx;
+
+	if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+			    MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
+		ret = -EIO;
+		goto err_rx;
+	}
+
+	ret = mt7601u_init_bbp(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt7601u_init_wcid_mem(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt7601u_init_key_mem(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt7601u_init_wcid_attr_mem(dev);
+	if (ret)
+		goto err_rx;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+					     MT_BEACON_TIME_CFG_SYNC_MODE |
+					     MT_BEACON_TIME_CFG_TBTT_EN |
+					     MT_BEACON_TIME_CFG_BEACON_TX));
+
+	mt7601u_reset_counters(dev);
+
+	mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+	mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
+					  MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+	ret = mt7601u_eeprom_init(dev);
+	if (ret)
+		goto err_rx;
+
+	ret = mt7601u_phy_init(dev);
+	if (ret)
+		goto err_rx;
+
+	mt7601u_set_rx_path(dev, 0);
+	mt7601u_set_tx_dac(dev, 0);
+
+	mt7601u_mac_set_ctrlch(dev, false);
+	mt7601u_bbp_set_ctrlch(dev, false);
+	mt7601u_bbp_set_bw(dev, MT_BW_20);
+
+	return 0;
+
+err_rx:
+	mt7601u_dma_cleanup(dev);
+err_mcu:
+	mt7601u_mcu_cmd_deinit(dev);
+err:
+	mt7601u_chip_onoff(dev, false, false);
+	return ret;
+}
+
+void mt7601u_cleanup(struct mt7601u_dev *dev)
+{
+	if (!test_and_clear_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+		return;
+
+	mt7601u_stop_hardware(dev);
+	mt7601u_dma_cleanup(dev);
+	mt7601u_mcu_cmd_deinit(dev);
+}
+
+struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
+{
+	struct ieee80211_hw *hw;
+	struct mt7601u_dev *dev;
+
+	hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
+	if (!hw)
+		return NULL;
+
+	dev = hw->priv;
+	dev->dev = pdev;
+	dev->hw = hw;
+	mutex_init(&dev->vendor_req_mutex);
+	mutex_init(&dev->reg_atomic_mutex);
+	mutex_init(&dev->hw_atomic_mutex);
+	mutex_init(&dev->mutex);
+	spin_lock_init(&dev->tx_lock);
+	spin_lock_init(&dev->rx_lock);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->con_mon_lock);
+	atomic_set(&dev->avg_ampdu_len, 1);
+
+	dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
+	if (!dev->stat_wq) {
+		ieee80211_free_hw(hw);
+		return NULL;
+	}
+
+	return dev;
+}
+
+#define CHAN2G(_idx, _freq) {			\
+	.band = IEEE80211_BAND_2GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+	CHAN2G(1, 2412),
+	CHAN2G(2, 2417),
+	CHAN2G(3, 2422),
+	CHAN2G(4, 2427),
+	CHAN2G(5, 2432),
+	CHAN2G(6, 2437),
+	CHAN2G(7, 2442),
+	CHAN2G(8, 2447),
+	CHAN2G(9, 2452),
+	CHAN2G(10, 2457),
+	CHAN2G(11, 2462),
+	CHAN2G(12, 2467),
+	CHAN2G(13, 2472),
+	CHAN2G(14, 2484),
+};
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
+		const struct ieee80211_channel *chan, int n_chan,
+		struct ieee80211_rate *rates, int n_rates)
+{
+	struct ieee80211_sta_ht_cap *ht_cap;
+	void *chanlist;
+	int size;
+
+	size = n_chan * sizeof(*chan);
+	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+	if (!chanlist)
+		return -ENOMEM;
+
+	sband->channels = chanlist;
+	sband->n_channels = n_chan;
+	sband->bitrates = rates;
+	sband->n_bitrates = n_rates;
+
+	ht_cap = &sband->ht_cap;
+	ht_cap->ht_supported = true;
+	ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+		      IEEE80211_HT_CAP_GRN_FLD |
+		      IEEE80211_HT_CAP_SGI_20 |
+		      IEEE80211_HT_CAP_SGI_40 |
+		      (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+	ht_cap->mcs.rx_mask[0] = 0xff;
+	ht_cap->mcs.rx_mask[4] = 0x1;
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+	dev->chandef.chan = &sband->channels[0];
+
+	return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt7601u_dev *dev)
+{
+	dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
+				     GFP_KERNEL);
+	dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+
+	WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+		ARRAY_SIZE(mt76_channels_2ghz));
+
+	return mt76_init_sband(dev, dev->sband_2g,
+			       &mt76_channels_2ghz[dev->ee->reg.start - 1],
+			       dev->ee->reg.num,
+			       mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+int mt7601u_register_device(struct mt7601u_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+	 * entry no. 1 like it does in the vendor driver.
+	 */
+	dev->wcid_mask[0] |= 1;
+
+	/* init fake wcid for monitor interfaces */
+	dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
+				     GFP_KERNEL);
+	if (!dev->mon_wcid)
+		return -ENOMEM;
+	dev->mon_wcid->idx = 0xff;
+	dev->mon_wcid->hw_key_idx = -1;
+
+	SET_IEEE80211_DEV(hw, dev->dev);
+
+	hw->queues = 4;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+
+	hw->sta_data_size = sizeof(struct mt76_sta);
+	hw->vif_data_size = sizeof(struct mt76_vif);
+
+	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+	ret = mt76_init_sband_2g(dev);
+	if (ret)
+		return ret;
+
+	INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
+	INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
+
+	ret = ieee80211_register_hw(hw);
+	if (ret)
+		return ret;
+
+	mt7601u_init_debugfs(dev);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h
new file mode 100644
index 0000000..ec11ff6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h
@@ -0,0 +1,164 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_INITVALS_H
+#define __MT7601U_INITVALS_H
+
+static const struct mt76_reg_pair bbp_common_vals[] = {
+	{  65,	0x2c },
+	{  66,	0x38 },
+	{  68,	0x0b },
+	{  69,	0x12 },
+	{  70,	0x0a },
+	{  73,	0x10 },
+	{  81,	0x37 },
+	{  82,	0x62 },
+	{  83,	0x6a },
+	{  84,	0x99 },
+	{  86,	0x00 },
+	{  91,	0x04 },
+	{  92,	0x00 },
+	{ 103,	0x00 },
+	{ 105,	0x05 },
+	{ 106,	0x35 },
+};
+
+static const struct mt76_reg_pair bbp_chip_vals[] = {
+	{   1, 0x04 },	{   4, 0x40 },	{  20, 0x06 },	{  31, 0x08 },
+	/* CCK Tx Control */
+	{ 178, 0xff },
+	/* AGC/Sync controls */
+	{  66, 0x14 },	{  68, 0x8b },	{  69, 0x12 },	{  70, 0x09 },
+	{  73, 0x11 },	{  75, 0x60 },	{  76, 0x44 },	{  84, 0x9a },
+	{  86, 0x38 },	{  91, 0x07 },	{  92, 0x02 },
+	/* Rx Path Controls */
+	{  99, 0x50 },	{ 101, 0x00 },	{ 103, 0xc0 },	{ 104, 0x92 },
+	{ 105, 0x3c },	{ 106, 0x03 },	{ 128, 0x12 },
+	/* Change RXWI content: Gain Report */
+	{ 142, 0x04 },	{ 143, 0x37 },
+	/* Change RXWI content: Antenna Report */
+	{ 142, 0x03 },	{ 143, 0x99 },
+	/* Calibration Index Register */
+	/* CCK Receiver Control */
+	{ 160, 0xeb },	{ 161, 0xc4 },	{ 162, 0x77 },	{ 163, 0xf9 },
+	{ 164, 0x88 },	{ 165, 0x80 },	{ 166, 0xff },	{ 167, 0xe4 },
+	/* Added AGC controls - these AGC/GLRT registers are accessed
+	 * through R195 and R196.
+	 */
+	{ 195, 0x00 },	{ 196, 0x00 },
+	{ 195, 0x01 },	{ 196, 0x04 },
+	{ 195, 0x02 },	{ 196, 0x20 },
+	{ 195, 0x03 },	{ 196, 0x0a },
+	{ 195, 0x06 },	{ 196, 0x16 },
+	{ 195, 0x07 },	{ 196, 0x05 },
+	{ 195, 0x08 },	{ 196, 0x37 },
+	{ 195, 0x0a },	{ 196, 0x15 },
+	{ 195, 0x0b },	{ 196, 0x17 },
+	{ 195, 0x0c },	{ 196, 0x06 },
+	{ 195, 0x0d },	{ 196, 0x09 },
+	{ 195, 0x0e },	{ 196, 0x05 },
+	{ 195, 0x0f },	{ 196, 0x09 },
+	{ 195, 0x10 },	{ 196, 0x20 },
+	{ 195, 0x20 },	{ 196, 0x17 },
+	{ 195, 0x21 },	{ 196, 0x06 },
+	{ 195, 0x22 },	{ 196, 0x09 },
+	{ 195, 0x23 },	{ 196, 0x17 },
+	{ 195, 0x24 },	{ 196, 0x06 },
+	{ 195, 0x25 },	{ 196, 0x09 },
+	{ 195, 0x26 },	{ 196, 0x17 },
+	{ 195, 0x27 },	{ 196, 0x06 },
+	{ 195, 0x28 },	{ 196, 0x09 },
+	{ 195, 0x29 },	{ 196, 0x05 },
+	{ 195, 0x2a },	{ 196, 0x09 },
+	{ 195, 0x80 },	{ 196, 0x8b },
+	{ 195, 0x81 },	{ 196, 0x12 },
+	{ 195, 0x82 },	{ 196, 0x09 },
+	{ 195, 0x83 },	{ 196, 0x17 },
+	{ 195, 0x84 },	{ 196, 0x11 },
+	{ 195, 0x85 },	{ 196, 0x00 },
+	{ 195, 0x86 },	{ 196, 0x00 },
+	{ 195, 0x87 },	{ 196, 0x18 },
+	{ 195, 0x88 },	{ 196, 0x60 },
+	{ 195, 0x89 },	{ 196, 0x44 },
+	{ 195, 0x8a },	{ 196, 0x8b },
+	{ 195, 0x8b },	{ 196, 0x8b },
+	{ 195, 0x8c },	{ 196, 0x8b },
+	{ 195, 0x8d },	{ 196, 0x8b },
+	{ 195, 0x8e },	{ 196, 0x09 },
+	{ 195, 0x8f },	{ 196, 0x09 },
+	{ 195, 0x90 },	{ 196, 0x09 },
+	{ 195, 0x91 },	{ 196, 0x09 },
+	{ 195, 0x92 },	{ 196, 0x11 },
+	{ 195, 0x93 },	{ 196, 0x11 },
+	{ 195, 0x94 },	{ 196, 0x11 },
+	{ 195, 0x95 },	{ 196, 0x11 },
+	/* PPAD */
+	{  47, 0x80 },	{  60, 0x80 },	{ 150, 0xd2 },	{ 151, 0x32 },
+	{ 152, 0x23 },	{ 153, 0x41 },	{ 154, 0x00 },	{ 155, 0x4f },
+	{ 253, 0x7e },	{ 195, 0x30 },	{ 196, 0x32 },	{ 195, 0x31 },
+	{ 196, 0x23 },	{ 195, 0x32 },	{ 196, 0x45 },	{ 195, 0x35 },
+	{ 196, 0x4a },	{ 195, 0x36 },	{ 196, 0x5a },	{ 195, 0x37 },
+	{ 196, 0x5a },
+};
+
+static const struct mt76_reg_pair mac_common_vals[] = {
+	{ MT_LEGACY_BASIC_RATE,		0x0000013f },
+	{ MT_HT_BASIC_RATE,		0x00008003 },
+	{ MT_MAC_SYS_CTRL,		0x00000000 },
+	{ MT_RX_FILTR_CFG,		0x00017f97 },
+	{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+	{ MT_TX_SW_CFG0,		0x00000000 },
+	{ MT_TX_SW_CFG1,		0x00080606 },
+	{ MT_TX_LINK_CFG,		0x00001020 },
+	{ MT_TX_TIMEOUT_CFG,		0x000a2090 },
+	{ MT_MAX_LEN_CFG,		0x00003fff },
+	{ MT_PBF_TX_MAX_PCNT,		0x1fbf1f1f },
+	{ MT_PBF_RX_MAX_PCNT,		0x0000009f },
+	{ MT_TX_RETRY_CFG,		0x47d01f0f },
+	{ MT_AUTO_RSP_CFG,		0x00000013 },
+	{ MT_CCK_PROT_CFG,		0x05740003 },
+	{ MT_OFDM_PROT_CFG,		0x05740003 },
+	{ MT_MM40_PROT_CFG,		0x03f44084 },
+	{ MT_GF20_PROT_CFG,		0x01744004 },
+	{ MT_GF40_PROT_CFG,		0x03f44084 },
+	{ MT_MM20_PROT_CFG,		0x01744004 },
+	{ MT_TXOP_CTRL_CFG,		0x0000583f },
+	{ MT_TX_RTS_CFG,		0x01092b20 },
+	{ MT_EXP_ACK_TIME,		0x002400ca },
+	{ MT_TXOP_HLDR_ET,		0x00000002 },
+	{ MT_XIFS_TIME_CFG,		0x33a41010 },
+	{ MT_PWR_PIN_CFG,		0x00000000 },
+};
+
+static const struct mt76_reg_pair mac_chip_vals[] = {
+	{ MT_TSO_CTRL,			0x00006050 },
+	{ MT_BCN_OFFSET(0),		0x18100800 },
+	{ MT_BCN_OFFSET(1),		0x38302820 },
+	{ MT_PBF_SYS_CTRL,		0x00080c00 },
+	{ MT_PBF_CFG,			0x7f723c1f },
+	{ MT_FCE_PSE_CTRL,		0x00000001 },
+	{ MT_PAUSE_ENABLE_CONTROL1,	0x00000000 },
+	{ MT_TX0_RF_GAIN_CORR,		0x003b0005 },
+	{ MT_TX0_RF_GAIN_ATTEN,		0x00006900 },
+	{ MT_TX0_BB_GAIN_ATTEN,		0x00000400 },
+	{ MT_TX_ALC_VGA3,		0x00060006 },
+	{ MT_TX_SW_CFG0,		0x00000402 },
+	{ MT_TX_SW_CFG1,		0x00000000 },
+	{ MT_TX_SW_CFG2,		0x00000000 },
+	{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+	{ MT_FCE_CSO,			0x0000030f },
+	{ MT_FCE_PARAMETERS,		0x00256f0f },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
new file mode 100644
index 0000000..a2bdc3e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
@@ -0,0 +1,291 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_PHY_INITVALS_H
+#define __MT7601U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value)				\
+	{ MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
+
+static const struct mt76_reg_pair rf_central[] = {
+	/* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
+	RF_REG_PAIR(0,	 0, 0x02),
+	RF_REG_PAIR(0,	 1, 0x01),
+	RF_REG_PAIR(0,	 2, 0x11),
+	RF_REG_PAIR(0,	 3, 0xff),
+	RF_REG_PAIR(0,	 4, 0x0a),
+	RF_REG_PAIR(0,	 5, 0x20),
+	RF_REG_PAIR(0,	 6, 0x00),
+	/* B/G */
+	RF_REG_PAIR(0,	 7, 0x00),
+	RF_REG_PAIR(0,	 8, 0x00),
+	RF_REG_PAIR(0,	 9, 0x00),
+	RF_REG_PAIR(0,	10, 0x00),
+	RF_REG_PAIR(0,	11, 0x21),
+	/* XO */
+	RF_REG_PAIR(0,	13, 0x00),		/* 40mhz xtal */
+	/* RF_REG_PAIR(0,	13, 0x13), */	/* 20mhz xtal */
+	RF_REG_PAIR(0,	14, 0x7c),
+	RF_REG_PAIR(0,	15, 0x22),
+	RF_REG_PAIR(0,	16, 0x80),
+	/* PLL */
+	RF_REG_PAIR(0,	17, 0x99),
+	RF_REG_PAIR(0,	18, 0x99),
+	RF_REG_PAIR(0,	19, 0x09),
+	RF_REG_PAIR(0,	20, 0x50),
+	RF_REG_PAIR(0,	21, 0xb0),
+	RF_REG_PAIR(0,	22, 0x00),
+	RF_REG_PAIR(0,	23, 0xc5),
+	RF_REG_PAIR(0,	24, 0xfc),
+	RF_REG_PAIR(0,	25, 0x40),
+	RF_REG_PAIR(0,	26, 0x4d),
+	RF_REG_PAIR(0,	27, 0x02),
+	RF_REG_PAIR(0,	28, 0x72),
+	RF_REG_PAIR(0,	29, 0x01),
+	RF_REG_PAIR(0,	30, 0x00),
+	RF_REG_PAIR(0,	31, 0x00),
+	/* test ports */
+	RF_REG_PAIR(0,	32, 0x00),
+	RF_REG_PAIR(0,	33, 0x00),
+	RF_REG_PAIR(0,	34, 0x23),
+	RF_REG_PAIR(0,	35, 0x01), /* change setting to reduce spurs */
+	RF_REG_PAIR(0,	36, 0x00),
+	RF_REG_PAIR(0,	37, 0x00),
+	/* ADC/DAC */
+	RF_REG_PAIR(0,	38, 0x00),
+	RF_REG_PAIR(0,	39, 0x20),
+	RF_REG_PAIR(0,	40, 0x00),
+	RF_REG_PAIR(0,	41, 0xd0),
+	RF_REG_PAIR(0,	42, 0x1b),
+	RF_REG_PAIR(0,	43, 0x02),
+	RF_REG_PAIR(0,	44, 0x00),
+};
+
+static const struct mt76_reg_pair rf_channel[] = {
+	RF_REG_PAIR(4,	 0, 0x01),
+	RF_REG_PAIR(4,	 1, 0x00),
+	RF_REG_PAIR(4,	 2, 0x00),
+	RF_REG_PAIR(4,	 3, 0x00),
+	/* LDO */
+	RF_REG_PAIR(4,	 4, 0x00),
+	RF_REG_PAIR(4,	 5, 0x08),
+	RF_REG_PAIR(4,	 6, 0x00),
+	/* RX */
+	RF_REG_PAIR(4,	 7, 0x5b),
+	RF_REG_PAIR(4,	 8, 0x52),
+	RF_REG_PAIR(4,	 9, 0xb6),
+	RF_REG_PAIR(4,	10, 0x57),
+	RF_REG_PAIR(4,	11, 0x33),
+	RF_REG_PAIR(4,	12, 0x22),
+	RF_REG_PAIR(4,	13, 0x3d),
+	RF_REG_PAIR(4,	14, 0x3e),
+	RF_REG_PAIR(4,	15, 0x13),
+	RF_REG_PAIR(4,	16, 0x22),
+	RF_REG_PAIR(4,	17, 0x23),
+	RF_REG_PAIR(4,	18, 0x02),
+	RF_REG_PAIR(4,	19, 0xa4),
+	RF_REG_PAIR(4,	20, 0x01),
+	RF_REG_PAIR(4,	21, 0x12),
+	RF_REG_PAIR(4,	22, 0x80),
+	RF_REG_PAIR(4,	23, 0xb3),
+	RF_REG_PAIR(4,	24, 0x00), /* reserved */
+	RF_REG_PAIR(4,	25, 0x00), /* reserved */
+	RF_REG_PAIR(4,	26, 0x00), /* reserved */
+	RF_REG_PAIR(4,	27, 0x00), /* reserved */
+	/* LOGEN */
+	RF_REG_PAIR(4,	28, 0x18),
+	RF_REG_PAIR(4,	29, 0xee),
+	RF_REG_PAIR(4,	30, 0x6b),
+	RF_REG_PAIR(4,	31, 0x31),
+	RF_REG_PAIR(4,	32, 0x5d),
+	RF_REG_PAIR(4,	33, 0x00), /* reserved */
+	/* TX */
+	RF_REG_PAIR(4,	34, 0x96),
+	RF_REG_PAIR(4,	35, 0x55),
+	RF_REG_PAIR(4,	36, 0x08),
+	RF_REG_PAIR(4,	37, 0xbb),
+	RF_REG_PAIR(4,	38, 0xb3),
+	RF_REG_PAIR(4,	39, 0xb3),
+	RF_REG_PAIR(4,	40, 0x03),
+	RF_REG_PAIR(4,	41, 0x00), /* reserved */
+	RF_REG_PAIR(4,	42, 0x00), /* reserved */
+	RF_REG_PAIR(4,	43, 0xc5),
+	RF_REG_PAIR(4,	44, 0xc5),
+	RF_REG_PAIR(4,	45, 0xc5),
+	RF_REG_PAIR(4,	46, 0x07),
+	RF_REG_PAIR(4,	47, 0xa8),
+	RF_REG_PAIR(4,	48, 0xef),
+	RF_REG_PAIR(4,	49, 0x1a),
+	/* PA */
+	RF_REG_PAIR(4,	54, 0x07),
+	RF_REG_PAIR(4,	55, 0xa7),
+	RF_REG_PAIR(4,	56, 0xcc),
+	RF_REG_PAIR(4,	57, 0x14),
+	RF_REG_PAIR(4,	58, 0x07),
+	RF_REG_PAIR(4,	59, 0xa8),
+	RF_REG_PAIR(4,	60, 0xd7),
+	RF_REG_PAIR(4,	61, 0x10),
+	RF_REG_PAIR(4,	62, 0x1c),
+	RF_REG_PAIR(4,	63, 0x00), /* reserved */
+};
+
+static const struct mt76_reg_pair rf_vga[] = {
+	RF_REG_PAIR(5,	 0, 0x47),
+	RF_REG_PAIR(5,	 1, 0x00),
+	RF_REG_PAIR(5,	 2, 0x00),
+	RF_REG_PAIR(5,	 3, 0x08),
+	RF_REG_PAIR(5,	 4, 0x04),
+	RF_REG_PAIR(5,	 5, 0x20),
+	RF_REG_PAIR(5,	 6, 0x3a),
+	RF_REG_PAIR(5,	 7, 0x3a),
+	RF_REG_PAIR(5,	 8, 0x00),
+	RF_REG_PAIR(5,	 9, 0x00),
+	RF_REG_PAIR(5,	10, 0x10),
+	RF_REG_PAIR(5,	11, 0x10),
+	RF_REG_PAIR(5,	12, 0x10),
+	RF_REG_PAIR(5,	13, 0x10),
+	RF_REG_PAIR(5,	14, 0x10),
+	RF_REG_PAIR(5,	15, 0x20),
+	RF_REG_PAIR(5,	16, 0x22),
+	RF_REG_PAIR(5,	17, 0x7c),
+	RF_REG_PAIR(5,	18, 0x00),
+	RF_REG_PAIR(5,	19, 0x00),
+	RF_REG_PAIR(5,	20, 0x00),
+	RF_REG_PAIR(5,	21, 0xf1),
+	RF_REG_PAIR(5,	22, 0x11),
+	RF_REG_PAIR(5,	23, 0x02),
+	RF_REG_PAIR(5,	24, 0x41),
+	RF_REG_PAIR(5,	25, 0x20),
+	RF_REG_PAIR(5,	26, 0x00),
+	RF_REG_PAIR(5,	27, 0xd7),
+	RF_REG_PAIR(5,	28, 0xa2),
+	RF_REG_PAIR(5,	29, 0x20),
+	RF_REG_PAIR(5,	30, 0x49),
+	RF_REG_PAIR(5,	31, 0x20),
+	RF_REG_PAIR(5,	32, 0x04),
+	RF_REG_PAIR(5,	33, 0xf1),
+	RF_REG_PAIR(5,	34, 0xa1),
+	RF_REG_PAIR(5,	35, 0x01),
+	RF_REG_PAIR(5,	41, 0x00),
+	RF_REG_PAIR(5,	42, 0x00),
+	RF_REG_PAIR(5,	43, 0x00),
+	RF_REG_PAIR(5,	44, 0x00),
+	RF_REG_PAIR(5,	45, 0x00),
+	RF_REG_PAIR(5,	46, 0x00),
+	RF_REG_PAIR(5,	47, 0x00),
+	RF_REG_PAIR(5,	48, 0x00),
+	RF_REG_PAIR(5,	49, 0x00),
+	RF_REG_PAIR(5,	50, 0x00),
+	RF_REG_PAIR(5,	51, 0x00),
+	RF_REG_PAIR(5,	52, 0x00),
+	RF_REG_PAIR(5,	53, 0x00),
+	RF_REG_PAIR(5,	54, 0x00),
+	RF_REG_PAIR(5,	55, 0x00),
+	RF_REG_PAIR(5,	56, 0x00),
+	RF_REG_PAIR(5,	57, 0x00),
+	RF_REG_PAIR(5,	58, 0x31),
+	RF_REG_PAIR(5,	59, 0x31),
+	RF_REG_PAIR(5,	60, 0x0a),
+	RF_REG_PAIR(5,	61, 0x02),
+	RF_REG_PAIR(5,	62, 0x00),
+	RF_REG_PAIR(5,	63, 0x00),
+};
+
+/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
+ *	 from channel switching. Seems stupid at best.
+ */
+static const struct mt76_reg_pair bbp_high_temp[] = {
+	{  75, 0x60 },
+	{  92, 0x02 },
+	{ 178, 0xff }, /* For CCK CH14 OBW */
+	{ 195, 0x88 }, { 196, 0x60 },
+}, bbp_high_temp_bw20[] = {
+	{  69, 0x12 },
+	{  91, 0x07 },
+	{ 195, 0x23 }, { 196, 0x17 },
+	{ 195, 0x24 }, { 196, 0x06 },
+	{ 195, 0x81 }, { 196, 0x12 },
+	{ 195, 0x83 }, { 196, 0x17 },
+}, bbp_high_temp_bw40[] = {
+	{  69, 0x15 },
+	{  91, 0x04 },
+	{ 195, 0x23 }, { 196, 0x12 },
+	{ 195, 0x24 }, { 196, 0x08 },
+	{ 195, 0x81 }, { 196, 0x15 },
+	{ 195, 0x83 }, { 196, 0x16 },
+}, bbp_low_temp[] = {
+	{ 178, 0xff }, /* For CCK CH14 OBW */
+}, bbp_low_temp_bw20[] = {
+	{  69, 0x12 },
+	{  75, 0x5e },
+	{  91, 0x07 },
+	{  92, 0x02 },
+	{ 195, 0x23 }, { 196, 0x17 },
+	{ 195, 0x24 }, { 196, 0x06 },
+	{ 195, 0x81 }, { 196, 0x12 },
+	{ 195, 0x83 }, { 196, 0x17 },
+	{ 195, 0x88 }, { 196, 0x5e },
+}, bbp_low_temp_bw40[] = {
+	{  69, 0x15 },
+	{  75, 0x5c },
+	{  91, 0x04 },
+	{  92, 0x03 },
+	{ 195, 0x23 }, { 196, 0x10 },
+	{ 195, 0x24 }, { 196, 0x08 },
+	{ 195, 0x81 }, { 196, 0x15 },
+	{ 195, 0x83 }, { 196, 0x16 },
+	{ 195, 0x88 }, { 196, 0x5b },
+}, bbp_normal_temp[] = {
+	{  75, 0x60 },
+	{  92, 0x02 },
+	{ 178, 0xff }, /* For CCK CH14 OBW */
+	{ 195, 0x88 }, { 196, 0x60 },
+}, bbp_normal_temp_bw20[] = {
+	{  69, 0x12 },
+	{  91, 0x07 },
+	{ 195, 0x23 }, { 196, 0x17 },
+	{ 195, 0x24 }, { 196, 0x06 },
+	{ 195, 0x81 }, { 196, 0x12 },
+	{ 195, 0x83 }, { 196, 0x17 },
+}, bbp_normal_temp_bw40[] = {
+	{  69, 0x15 },
+	{  91, 0x04 },
+	{ 195, 0x23 }, { 196, 0x12 },
+	{ 195, 0x24 }, { 196, 0x08 },
+	{ 195, 0x81 }, { 196, 0x15 },
+	{ 195, 0x83 }, { 196, 0x16 },
+};
+
+#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
+
+static const struct reg_table {
+	const struct mt76_reg_pair *regs;
+	size_t n;
+} bbp_mode_table[3][3] = {
+	{
+		BBP_TABLE(bbp_normal_temp_bw20),
+		BBP_TABLE(bbp_normal_temp_bw40),
+		BBP_TABLE(bbp_normal_temp),
+	}, {
+		BBP_TABLE(bbp_high_temp_bw20),
+		BBP_TABLE(bbp_high_temp_bw40),
+		BBP_TABLE(bbp_high_temp),
+	}, {
+		BBP_TABLE(bbp_low_temp_bw20),
+		BBP_TABLE(bbp_low_temp_bw40),
+		BBP_TABLE(bbp_low_temp),
+	}
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
new file mode 100644
index 0000000..7514bce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
+{
+	u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		txrate->idx = idx + 4;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+	if (rate & MT_TXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
+			struct mt76_tx_status *st)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	int cur_idx, last_rate;
+	int i;
+
+	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+	mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
+	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+		rate[last_rate + 1].idx = -1;
+
+	cur_idx = rate[last_rate].idx + st->retry;
+	for (i = 0; i <= last_rate; i++) {
+		rate[i].flags = rate[last_rate].flags;
+		rate[i].idx = max_t(int, 0, cur_idx - i);
+		rate[i].count = 1;
+	}
+
+	if (last_rate > 0)
+		rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+	info->status.ampdu_len = 1;
+	info->status.ampdu_ack_len = st->success;
+
+	if (st->is_probe)
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+			 const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u16 rateval;
+	u8 phy, rate_idx;
+	u8 nss = 1;
+	u8 bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->chandef.chan->band;
+		u16 val;
+
+		r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		bw = 0;
+	}
+
+	rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
+	rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
+	rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return rateval;
+}
+
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+			    const struct ieee80211_tx_rate *rate)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+	wcid->tx_rate_set = true;
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
+{
+	struct mt76_tx_status stat = {};
+	u32 val;
+
+	val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
+	stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
+	stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
+	stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
+	stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
+	stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+	stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
+	stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+
+	return stat;
+}
+
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt76_wcid *wcid = NULL;
+	void *msta;
+
+	rcu_read_lock();
+	if (stat->wcid < ARRAY_SIZE(dev->wcid))
+		wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+	if (wcid) {
+		msta = container_of(wcid, struct mt76_sta, wcid);
+		sta = container_of(msta, struct ieee80211_sta,
+				   drv_priv);
+	}
+
+	mt76_mac_fill_tx_status(dev, &info, stat);
+	ieee80211_tx_status_noskb(dev->hw, sta, &info);
+	rcu_read_unlock();
+}
+
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+				int ht_mode)
+{
+	int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+	u32 prot[6];
+	bool ht_rts[4] = {};
+	int i;
+
+	prot[0] = MT_PROT_NAV_SHORT |
+		  MT_PROT_TXOP_ALLOW_ALL |
+		  MT_PROT_RTS_THR_EN;
+	prot[1] = prot[0];
+	if (legacy_prot)
+		prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+	prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+	prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+	if (legacy_prot) {
+		prot[2] |= MT_PROT_RATE_CCK_11;
+		prot[3] |= MT_PROT_RATE_CCK_11;
+		prot[4] |= MT_PROT_RATE_CCK_11;
+		prot[5] |= MT_PROT_RATE_CCK_11;
+	} else {
+		prot[2] |= MT_PROT_RATE_OFDM_24;
+		prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+		prot[4] |= MT_PROT_RATE_OFDM_24;
+		prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+	}
+
+	switch (mode) {
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+		ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+		ht_rts[1] = ht_rts[3] = true;
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+		ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+		break;
+	}
+
+	if (non_gf)
+		ht_rts[2] = ht_rts[3] = true;
+
+	for (i = 0; i < 4; i++)
+		if (ht_rts[i])
+			prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+	for (i = 0; i < 6; i++)
+		mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
+{
+	if (short_preamb)
+		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+	else
+		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
+{
+	u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
+
+	val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+		 MT_BEACON_TIME_CFG_SYNC_MODE |
+		 MT_BEACON_TIME_CFG_TBTT_EN);
+
+	if (!enable) {
+		mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
+		return;
+	}
+
+	val &= ~MT_BEACON_TIME_CFG_INTVAL;
+	val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+		MT_BEACON_TIME_CFG_TIMER_EN |
+		MT_BEACON_TIME_CFG_SYNC_MODE |
+		MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
+{
+	u32 val = mt7601u_rr(dev, 0x10f4);
+
+	if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+		return;
+
+	dev_err(dev->dev, "Error: MAC specific condition occurred\n");
+
+	mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+	udelay(10);
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+
+void mt7601u_mac_work(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					       mac_work.work);
+	struct {
+		u32 addr_base;
+		u32 span;
+		u64 *stat_base;
+	} spans[] = {
+		{ MT_RX_STA_CNT0,	3,	dev->stats.rx_stat },
+		{ MT_TX_STA_CNT0,	3,	dev->stats.tx_stat },
+		{ MT_TX_AGG_STAT,	1,	dev->stats.aggr_stat },
+		{ MT_MPDU_DENSITY_CNT,	1,	dev->stats.zero_len_del },
+		{ MT_TX_AGG_CNT_BASE0,	8,	&dev->stats.aggr_n[0] },
+		{ MT_TX_AGG_CNT_BASE1,	8,	&dev->stats.aggr_n[16] },
+	};
+	u32 sum, n;
+	int i, j, k;
+
+	/* Note: using MCU_RANDOM_READ is actually slower then reading all the
+	 *	 registers by hand.  MCU takes ca. 20ms to complete read of 24
+	 *	 registers while reading them one by one will takes roughly
+	 *	 24*200us =~ 5ms.
+	 */
+
+	k = 0;
+	n = 0;
+	sum = 0;
+	for (i = 0; i < ARRAY_SIZE(spans); i++)
+		for (j = 0; j < spans[i].span; j++) {
+			u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
+
+			spans[i].stat_base[j * 2] += val & 0xffff;
+			spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+			/* Calculate average AMPDU length */
+			if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+			    spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+				continue;
+
+			n += (val >> 16) + (val & 0xffff);
+			sum += (val & 0xffff) * (1 + k * 2) +
+				(val >> 16) * (2 + k * 2);
+			k++;
+		}
+
+	atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+	mt7601u_check_mac_err(dev);
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+	u8 zmac[ETH_ALEN] = {};
+	u32 attr;
+
+	attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	if (mac)
+		memcpy(zmac, mac, sizeof(zmac));
+
+	mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
+{
+	struct ieee80211_sta *sta;
+	struct mt76_wcid *wcid;
+	void *msta;
+	u8 min_factor = 3;
+	int i;
+
+	rcu_read_lock();
+	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+		wcid = rcu_dereference(dev->wcid[i]);
+		if (!wcid)
+			continue;
+
+		msta = container_of(wcid, struct mt76_sta, wcid);
+		sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+		min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+	}
+	rcu_read_unlock();
+
+	mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+		   MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+	u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+
+	switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (WARN_ON(idx >= 8))
+			idx = 0;
+		idx += 4;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->flag |= RX_FLAG_SHORTPRE;
+		}
+
+		if (WARN_ON(idx >= 4))
+			idx = 0;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		status->flag |= RX_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->flag |= RX_FLAG_HT;
+		status->rate_idx = idx;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->flag |= RX_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->flag |= 1 << RX_FLAG_STBC_SHIFT;
+
+	if (rate & MT_RXWI_RATE_BW)
+		status->flag |= RX_FLAG_40MHZ;
+}
+
+static void
+mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+			  u16 rate, int rssi)
+{
+	dev->bcn_freq_off = rxwi->freq_off;
+	dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+	dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+}
+
+static int
+mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+	return ieee80211_is_beacon(hdr->frame_control) &&
+		ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			u8 *data, void *rxi)
+{
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct mt7601u_rxwi *rxwi = rxi;
+	u32 len, ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	int rssi;
+
+	len = MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	if (len < 10)
+		return 0;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+	}
+
+	status->chains = BIT(0);
+	rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
+	status->chain_signal[0] = status->signal = rssi;
+	status->freq = dev->chandef.chan->center_freq;
+	status->band = dev->chandef.chan->band;
+
+	mt76_mac_process_rate(status, rate);
+
+	spin_lock_bh(&dev->con_mon_lock);
+	if (mt7601u_rx_is_our_beacon(dev, data))
+		mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
+	else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
+		dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+			  struct ieee80211_key_conf *key)
+{
+	enum mt76_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+	u32 val;
+
+	cipher = mt76_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EINVAL;
+
+	trace_set_key(dev, idx);
+
+	mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP) {
+			/* Note: start with 1 to comply with spec,
+			 *	 (see comment on common/cmm_wpa.c:4291).
+			 */
+			iv_data[0] |= 1;
+			iv_data[3] |= 0x20;
+		}
+	}
+	mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
+	val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+	val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+	       MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+	val &= ~MT_WCID_ATTR_PAIRWISE;
+	val |= MT_WCID_ATTR_PAIRWISE *
+		!!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+	mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
+
+	return 0;
+}
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key)
+{
+	enum mt76_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EINVAL;
+
+	trace_set_shared_key(dev, vif_idx, key_idx);
+
+	mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+			key_data, sizeof(key_data));
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h
new file mode 100644
index 0000000..2c22d63
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+struct mt76_tx_status {
+	u8 valid:1;
+	u8 success:1;
+	u8 aggr:1;
+	u8 ack_req:1;
+	u8 is_probe:1;
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+	u16 rate;
+} __packed __aligned(2);
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ *	 are called for MT7601U, names used by this driver are educated guesses
+ *	 (see vendor mac/ral_omac.c).
+ */
+struct mt7601u_rxwi {
+	__le32 rxinfo;
+
+	__le32 ctl;
+
+	__le16 frag_sn;
+	__le16 rate;
+
+	u8 unknown;
+	u8 zero[3];
+
+	u8 snr;
+	u8 ant;
+	u8 gain;
+	u8 freq_off;
+
+	__le32 resv2;
+	__le32 expert_ant;
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA			BIT(0)
+#define MT_RXINFO_DATA			BIT(1)
+#define MT_RXINFO_NULL			BIT(2)
+#define MT_RXINFO_FRAG			BIT(3)
+#define MT_RXINFO_U2M			BIT(4)
+#define MT_RXINFO_MULTICAST		BIT(5)
+#define MT_RXINFO_BROADCAST		BIT(6)
+#define MT_RXINFO_MYBSS			BIT(7)
+#define MT_RXINFO_CRCERR		BIT(8)
+#define MT_RXINFO_ICVERR		BIT(9)
+#define MT_RXINFO_MICERR		BIT(10)
+#define MT_RXINFO_AMSDU			BIT(11)
+#define MT_RXINFO_HTC			BIT(12)
+#define MT_RXINFO_RSSI			BIT(13)
+#define MT_RXINFO_L2PAD			BIT(14)
+#define MT_RXINFO_AMPDU			BIT(15)
+#define MT_RXINFO_DECRYPT		BIT(16)
+#define MT_RXINFO_BSSIDX3		BIT(17)
+#define MT_RXINFO_WAPI_KEY		BIT(18)
+#define MT_RXINFO_PN_LEN		GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211		BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS	BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS		BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR		BIT(30)
+#define MT_RXINFO_IP_SUM_ERR		BIT(31)
+
+#define MT_RXWI_CTL_WCID		GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX		GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX		GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF			GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN		GENMASK(27, 16)
+#define MT_RXWI_CTL_TID			GENMASK(31, 28)
+
+#define MT_RXWI_FRAG			GENMASK(3, 0)
+#define MT_RXWI_SN			GENMASK(15, 4)
+
+#define MT_RXWI_RATE_MCS		GENMASK(6, 0)
+#define MT_RXWI_RATE_BW			BIT(7)
+#define MT_RXWI_RATE_SGI		BIT(8)
+#define MT_RXWI_RATE_STBC		GENMASK(10, 9)
+#define MT_RXWI_RATE_ETXBF		BIT(11)
+#define MT_RXWI_RATE_SND		BIT(12)
+#define MT_RXWI_RATE_ITXBF		BIT(13)
+#define MT_RXWI_RATE_PHY		GENMASK(15, 14)
+
+#define MT_RXWI_GAIN_RSSI_VAL		GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID	GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA		BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID		GENMASK(7, 0)
+
+enum mt76_phy_type {
+	MT_PHY_TYPE_CCK,
+	MT_PHY_TYPE_OFDM,
+	MT_PHY_TYPE_HT,
+	MT_PHY_TYPE_HT_GF,
+};
+
+enum mt76_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+};
+
+struct mt76_txwi {
+	__le16 flags;
+	__le16 rate_ctl;
+
+	u8 ack_ctl;
+	u8 wcid;
+	__le16 len_ctl;
+
+	__le32 iv;
+
+	__le32 eiv;
+
+	u8 aid;
+	u8 txstream;
+	__le16 ctl;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG		BIT(0)
+#define MT_TXWI_FLAGS_MMPS		BIT(1)
+#define MT_TXWI_FLAGS_CFACK		BIT(2)
+#define MT_TXWI_FLAGS_TS		BIT(3)
+#define MT_TXWI_FLAGS_AMPDU		BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY	GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP		GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN		GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK	BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT		BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT	BIT(15)
+
+#define MT_TXWI_RATE_MCS		GENMASK(6, 0)
+#define MT_TXWI_RATE_BW			BIT(7)
+#define MT_TXWI_RATE_SGI		BIT(8)
+#define MT_TXWI_RATE_STBC		GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE		GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ		BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ		BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW	GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT		GENMASK(11, 0)
+#define MT_TXWI_LEN_PKTID		GENMASK(15, 12)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ	GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT	BIT(4)
+#define MT_TXWI_CTL_PIFS_REV		BIT(6)
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			u8 *data, void *rxi);
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+			  struct ieee80211_key_conf *key);
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+			    const struct ieee80211_tx_rate *rate);
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key);
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+			 const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
new file mode 100644
index 0000000..169384b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+
+static int mt7601u_start(struct ieee80211_hw *hw)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+
+	ret = mt7601u_mac_start(dev);
+	if (ret)
+		goto out;
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+out:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void mt7601u_stop(struct ieee80211_hw *hw)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mac_work);
+	mt7601u_mac_stop(dev);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int mt7601u_add_interface(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	unsigned int idx = 0;
+	unsigned int wcid = GROUP_WCID(idx);
+
+	/* Note: for AP do the AP-STA things mt76 does:
+	 *	- beacon offsets
+	 *	- do mac address tricks
+	 *	- shift vif idx
+	 */
+	mvif->idx = idx;
+
+	if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
+		return -ENOSPC;
+	dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
+	mvif->group_wcid.idx = wcid;
+	mvif->group_wcid.hw_key_idx = -1;
+
+	return 0;
+}
+
+static void mt7601u_remove_interface(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	unsigned int wcid = mvif->group_wcid.idx;
+
+	dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+		      unsigned int *total_flags, u64 multicast)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mutex);
+
+	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(OTHER_BSS, MT_RX_FILTR_CFG_PROMISC);
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt7601u_phy_con_cal_onoff(dev, info);
+
+	if (changed & BSS_CHANGED_BSSID) {
+		mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+		/* Note: this is a hack because beacon_int is not changed
+		 *	 on leave nor is any more appropriate event generated.
+		 *	 rt2x00 doesn't seem to be bothered though.
+		 */
+		if (is_zero_ether_addr(info->bssid))
+			mt7601u_mac_config_tsf(dev, false, 0);
+	}
+
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+		mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+		mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+		mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+		mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+	}
+
+	if (changed & BSS_CHANGED_BEACON_INT)
+		mt7601u_mac_config_tsf(dev, true, info->beacon_int);
+
+	if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+		mt7601u_mac_set_protection(dev, info->use_cts_prot,
+					   info->ht_operation_mode);
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE)
+		mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+			       MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+	}
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt7601u_phy_recalibrate_after_assoc(dev);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76_wcid_alloc(struct mt7601u_dev *dev)
+{
+	int i, idx = 0;
+
+	for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+		idx = ffs(~dev->wcid_mask[i]);
+		if (!idx)
+			continue;
+
+		idx--;
+		dev->wcid_mask[i] |= BIT(idx);
+		break;
+	}
+
+	idx = i * BITS_PER_LONG + idx;
+	if (idx > 119)
+		return -1;
+
+	return idx;
+}
+
+static int
+mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	int ret = 0;
+	int idx = 0;
+
+	mutex_lock(&dev->mutex);
+
+	idx = mt76_wcid_alloc(dev);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+	mt7601u_mac_set_ampdu_factor(dev);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static int
+mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	int idx = msta->wcid.idx;
+
+	mutex_lock(&dev->mutex);
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+	dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+	mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
+	mt7601u_mac_set_ampdu_factor(dev);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void
+mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt7601u_sw_scan(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif,
+		const u8 *mac_addr)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mt7601u_agc_save(dev);
+	set_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static void
+mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
+			 struct ieee80211_vif *vif)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mt7601u_agc_restore(dev);
+	clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static int
+mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		struct ieee80211_key_conf *key)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+	struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+	} else {
+		if (idx == wcid->hw_key_idx)
+			wcid->hw_key_idx = -1;
+
+		key = NULL;
+	}
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+	return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  enum ieee80211_ampdu_mlme_action action,
+		  struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+	WARN_ON(msta->wcid.idx > GROUP_WCID(0));
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+			   BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		msta->agg_ssn[tid] = *ssn << 4;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+
+	return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	struct ieee80211_sta_rates *rates;
+	struct ieee80211_tx_rate rate = {};
+
+	rcu_read_lock();
+	rates = rcu_dereference(sta->rates);
+
+	if (!rates)
+		goto out;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+	rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt7601u_ops = {
+	.tx = mt7601u_tx,
+	.start = mt7601u_start,
+	.stop = mt7601u_stop,
+	.add_interface = mt7601u_add_interface,
+	.remove_interface = mt7601u_remove_interface,
+	.config = mt7601u_config,
+	.configure_filter = mt76_configure_filter,
+	.bss_info_changed = mt7601u_bss_info_changed,
+	.sta_add = mt7601u_sta_add,
+	.sta_remove = mt7601u_sta_remove,
+	.sta_notify = mt7601u_sta_notify,
+	.set_key = mt7601u_set_key,
+	.conf_tx = mt7601u_conf_tx,
+	.sw_scan_start = mt7601u_sw_scan,
+	.sw_scan_complete = mt7601u_sw_scan_complete,
+	.ampdu_action = mt76_ampdu_action,
+	.sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+	.set_rts_threshold = mt7601u_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
new file mode 100644
index 0000000..fbb1986
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -0,0 +1,534 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD		0x3800
+#define MCU_FW_URB_SIZE			(MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE		1024
+
+static inline int firmware_running(struct mt7601u_dev *dev)
+{
+	return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+	put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
+					    u8 seq, enum mcu_cmd cmd)
+{
+	WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+				     MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
+				     MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+}
+
+static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
+					    struct sk_buff *skb, bool need_resp)
+{
+	u32 i, csum = 0;
+
+	for (i = 0; i < skb->len / 4; i++)
+		csum ^= get_unaligned_le32(skb->data + i * 4);
+
+	trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
+{
+	struct sk_buff *skb;
+
+	WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+	skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+	memcpy(skb_put(skb, len), data, len);
+
+	return skb;
+}
+
+static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
+{
+	struct urb *urb = dev->mcu.resp.urb;
+	u32 rxfce;
+	int urb_status, ret, i = 5;
+
+	while (i--) {
+		if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+						 msecs_to_jiffies(300))) {
+			dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
+			continue;
+		}
+
+		/* Make copies of important data before reusing the urb */
+		rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+		urb_status = urb->status * mt7601u_urb_has_error(urb);
+
+		ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+					     &dev->mcu.resp, GFP_KERNEL,
+					     mt7601u_complete_urb,
+					     &dev->mcu.resp_cmpl);
+		if (ret)
+			return ret;
+
+		if (urb_status)
+			dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
+				urb_status);
+
+		if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+		    MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+			return 0;
+
+		dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
+			MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+			seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+	}
+
+	dev_err(dev->dev, "Error: %s timed out\n", __func__);
+	return -ETIMEDOUT;
+}
+
+static int
+mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
+		     enum mcu_cmd cmd, bool wait_resp)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+					    dev->out_eps[MT_EP_OUT_INBAND_CMD]);
+	int sent, ret;
+	u8 seq = 0;
+
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0;
+
+	mutex_lock(&dev->mcu.mutex);
+
+	if (wait_resp)
+		while (!seq)
+			seq = ++dev->mcu.msg_seq & 0xf;
+
+	mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
+
+	if (dev->mcu.resp_cmpl.done)
+		dev_err(dev->dev, "Error: MCU response pre-completed!\n");
+
+	trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
+	trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
+	ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+	if (ret) {
+		dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
+		goto out;
+	}
+	if (sent != skb->len)
+		dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
+
+	if (wait_resp)
+		ret = mt7601u_mcu_wait_resp(dev, seq);
+out:
+	mutex_unlock(&dev->mcu.mutex);
+
+	consume_skb(skb);
+
+	return ret;
+}
+
+static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
+				       enum mcu_function func, u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(func),
+		.value = cpu_to_le32(val),
+	};
+
+	skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+	return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
+{
+	int ret;
+
+	if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
+		return 0;
+
+	ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
+					  use_hvga);
+	if (ret) {
+		dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
+		return ret;
+	}
+
+	dev->tssi_read_trig = true;
+
+	return 0;
+}
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(cal),
+		.value = cpu_to_le32(val),
+	};
+
+	skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+	return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+			    const struct mt76_reg_pair *data, int n)
+{
+	const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+
+	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	for (i = 0; i < cnt; i++) {
+		skb_put_le32(skb, base + data[i].reg);
+		skb_put_le32(skb, data[i].value);
+	}
+
+	ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+	if (ret)
+		return ret;
+
+	return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+			     const u32 *data, int n)
+{
+	const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_regs_per_cmd, n);
+
+	skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+	for (i = 0; i < cnt; i++)
+		skb_put_le32(skb, data[i]);
+
+	ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+	if (ret)
+		return ret;
+
+	return mt7601u_burst_write_regs(dev, offset + cnt * 4,
+					data + cnt, n - cnt);
+}
+
+struct mt76_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76_fw {
+	struct mt76_fw_header hdr;
+	u8 ivb[MT_MCU_IVB_SIZE];
+	u8 ilm[];
+};
+
+static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
+			    const struct mt7601u_dma_buf *dma_buf,
+			    const void *data, u32 len, u32 dst_addr)
+{
+	DECLARE_COMPLETION_ONSTACK(cmpl);
+	struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
+	__le32 reg;
+	u32 val;
+	int ret;
+
+	reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
+			  MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+			  MT76_SET(MT_TXD_INFO_LEN, len));
+	memcpy(buf.buf, &reg, sizeof(reg));
+	memcpy(buf.buf + sizeof(reg), data, len);
+	memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+				       MT_FCE_DMA_ADDR, dst_addr);
+	if (ret)
+		return ret;
+	len = roundup(len, 4);
+	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+				       MT_FCE_DMA_LEN, len << 16);
+	if (ret)
+		return ret;
+
+	buf.len = MT_DMA_HDR_LEN + len + 4;
+	ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+				     &buf, GFP_KERNEL,
+				     mt7601u_complete_urb, &cmpl);
+	if (ret)
+		return ret;
+
+	if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+		dev_err(dev->dev, "Error: firmware upload timed out\n");
+		usb_kill_urb(buf.urb);
+		return -ETIMEDOUT;
+	}
+	if (mt7601u_urb_has_error(buf.urb)) {
+		dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
+			buf.urb->status);
+		return buf.urb->status;
+	}
+
+	val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+	val++;
+	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+	return 0;
+}
+
+static int
+mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
+	       const void *data, int len, u32 dst_addr)
+{
+	int n, ret;
+
+	if (len == 0)
+		return 0;
+
+	n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+	ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
+	if (ret)
+		return ret;
+
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+		return -ETIMEDOUT;
+
+	return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
+{
+	struct mt7601u_dma_buf dma_buf;
+	void *ivb;
+	u32 ilm_len, dlm_len;
+	int i, ret;
+
+	ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+	if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+	dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
+		ilm_len, sizeof(fw->ivb));
+	ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+	if (ret)
+		goto error;
+
+	dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+	dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
+	ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+			     dlm_len, MT_MCU_DLM_OFFSET);
+	if (ret)
+		goto error;
+
+	ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+				     0x12, 0, ivb, sizeof(fw->ivb));
+	if (ret < 0)
+		goto error;
+	ret = 0;
+
+	for (i = 100; i && !firmware_running(dev); i--)
+		msleep(10);
+	if (!i) {
+		ret = -ETIMEDOUT;
+		goto error;
+	}
+
+	dev_dbg(dev->dev, "Firmware running!\n");
+error:
+	kfree(ivb);
+	mt7601u_usb_free_buf(dev, &dma_buf);
+
+	return ret;
+}
+
+static int mt7601u_load_firmware(struct mt7601u_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76_fw_header *hdr;
+	int len, ret;
+	u32 val;
+
+	mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+					 MT_USB_DMA_CFG_TX_BULK_EN));
+
+	if (firmware_running(dev))
+		return 0;
+
+	ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto err_inv_fw;
+
+	hdr = (const struct mt76_fw_header *) fw->data;
+
+	if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+		goto err_inv_fw;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto err_inv_fw;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->dev,
+		 "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+		 le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt7601u_wr(dev, 0x94c, 0);
+	mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
+
+	mt7601u_vendor_reset(dev);
+	msleep(5);
+
+	mt7601u_wr(dev, 0xa44, 0);
+	mt7601u_wr(dev, 0x230, 0x84210);
+	mt7601u_wr(dev, 0x400, 0x80c00);
+	mt7601u_wr(dev, 0x800, 1);
+
+	mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+					 MT_PBF_CFG_TX1Q_EN |
+					 MT_PBF_CFG_TX2Q_EN |
+					 MT_PBF_CFG_TX3Q_EN));
+
+	mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+	mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+					 MT_USB_DMA_CFG_TX_BULK_EN));
+	val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
+	val &= ~MT_USB_DMA_CFG_TX_CLR;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+	/* FCE tx_fs_base_ptr */
+	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+	/* FCE pdma enable */
+	mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
+
+	ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+
+	release_firmware(fw);
+
+	return ret;
+
+err_inv_fw:
+	dev_err(dev->dev, "Invalid firmware image\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	mutex_init(&dev->mcu.mutex);
+
+	ret = mt7601u_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
+
+	return 0;
+}
+
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
+	if (ret)
+		return ret;
+
+	init_completion(&dev->mcu.resp_cmpl);
+	if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+		mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+		return -ENOMEM;
+	}
+
+	ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+				     &dev->mcu.resp, GFP_KERNEL,
+				     mt7601u_complete_urb, &dev->mcu.resp_cmpl);
+	if (ret) {
+		mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+		return ret;
+	}
+
+	return 0;
+}
+
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
+{
+	usb_kill_urb(dev->mcu.resp.urb);
+	mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h
new file mode 100644
index 0000000..4a66d10
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_MCU_H
+#define __MT7601U_MCU_H
+
+struct mt7601u_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL		0x070C
+#define MT_MCU_INT_LEVEL		0x0718
+#define MT_MCU_COM_REG0			0x0730
+#define MT_MCU_COM_REG1			0x0734
+#define MT_MCU_COM_REG2			0x0738
+#define MT_MCU_COM_REG3			0x073C
+
+#define MT_MCU_IVB_SIZE			0x40
+#define MT_MCU_DLM_OFFSET		0x80000
+
+#define MT_MCU_MEMMAP_WLAN		0x00410000
+#define MT_MCU_MEMMAP_BBP		0x40000000
+#define MT_MCU_MEMMAP_RF		0x80000000
+
+#define INBAND_PACKET_MAX_LEN		192
+
+enum mcu_cmd {
+	CMD_FUN_SET_OP = 1,
+	CMD_LOAD_CR = 2,
+	CMD_INIT_GAIN_OP = 3,
+	CMD_DYNC_VGA_OP = 6,
+	CMD_TDLS_CH_SW = 7,
+	CMD_BURST_WRITE = 8,
+	CMD_READ_MODIFY_WRITE = 9,
+	CMD_RANDOM_READ = 10,
+	CMD_BURST_READ = 11,
+	CMD_RANDOM_WRITE = 12,
+	CMD_LED_MODE_OP = 16,
+	CMD_POWER_SAVING_OP = 20,
+	CMD_WOW_CONFIG = 21,
+	CMD_WOW_QUERY = 22,
+	CMD_WOW_FEATURE = 24,
+	CMD_CARRIER_DETECT_OP = 28,
+	CMD_RADOR_DETECT_OP = 29,
+	CMD_SWITCH_CHANNEL_OP = 30,
+	CMD_CALIBRATION_OP = 31,
+	CMD_BEACON_OP = 32,
+	CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+	Q_SELECT = 1,
+	ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+	RADIO_OFF = 0x30,
+	RADIO_ON = 0x31,
+	RADIO_OFF_AUTO_WAKEUP = 0x32,
+	RADIO_OFF_ADVANCE = 0x33,
+	RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+	MCU_CAL_R = 1,
+	MCU_CAL_DCOC,
+	MCU_CAL_LC,
+	MCU_CAL_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_BW,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQ,
+	MCU_CAL_TXDCOC,
+};
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev);
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
new file mode 100644
index 0000000..9102be6b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT7601U_H
+#define MT7601U_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "regs.h"
+#include "util.h"
+
+#define MT_CALIBRATE_INTERVAL		(4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY		(30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL	(10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL	(HZ / 2)
+
+#define MT_BBP_REG_VERSION		0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT		28 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
+#define MT_RX_ORDER			3
+#define MT_RX_URB_SIZE			(PAGE_SIZE << MT_RX_ORDER)
+
+struct mt7601u_dma_buf {
+	struct urb *urb;
+	void *buf;
+	dma_addr_t dma;
+	size_t len;
+};
+
+struct mt7601u_mcu {
+	struct mutex mutex;
+
+	u8 msg_seq;
+
+	struct mt7601u_dma_buf resp;
+	struct completion resp_cmpl;
+};
+
+struct mt7601u_freq_cal {
+	struct delayed_work work;
+	u8 freq;
+	bool enabled;
+	bool adjusting;
+};
+
+struct mac_stats {
+	u64 rx_stat[6];
+	u64 tx_stat[6];
+	u64 aggr_stat[2];
+	u64 aggr_n[32];
+	u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES	16
+struct mt7601u_rx_queue {
+	struct mt7601u_dev *dev;
+
+	struct mt7601u_dma_buf_rx {
+		struct urb *urb;
+		struct page *p;
+	} e[N_RX_ENTRIES];
+
+	unsigned int start;
+	unsigned int end;
+	unsigned int entries;
+	unsigned int pending;
+};
+
+#define N_TX_ENTRIES	64
+
+struct mt7601u_tx_queue {
+	struct mt7601u_dev *dev;
+
+	struct mt7601u_dma_buf_tx {
+		struct urb *urb;
+		struct sk_buff *skb;
+	} e[N_TX_ENTRIES];
+
+	unsigned int start;
+	unsigned int end;
+	unsigned int entries;
+	unsigned int used;
+	unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ *     0: mcast wcid
+ *     1: bssid wcid
+ *  1...: STAs
+ * ...7e: group wcids
+ *    7f: reserved
+ */
+#define N_WCIDS		128
+#define GROUP_WCID(idx)	(N_WCIDS - 2 - idx)
+
+struct mt7601u_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE		39
+#define MT_FREQ_OFFSET_INVALID		-128
+
+enum mt_temp_mode {
+	MT_TEMP_MODE_NORMAL,
+	MT_TEMP_MODE_HIGH,
+	MT_TEMP_MODE_LOW,
+};
+
+enum mt_bw {
+	MT_BW_20,
+	MT_BW_40,
+};
+
+enum {
+	MT7601U_STATE_INITIALIZED,
+	MT7601U_STATE_REMOVED,
+	MT7601U_STATE_WLAN_RUNNING,
+	MT7601U_STATE_MCU_RUNNING,
+	MT7601U_STATE_SCANNING,
+	MT7601U_STATE_READING_STATS,
+	MT7601U_STATE_MORE_STATS,
+};
+
+/**
+ * struct mt7601u_dev - adapter structure
+ * @lock:		protects @wcid->tx_rate.
+ * @tx_lock:		protects @tx_q and changes of MT7601U_STATE_*_STATS
+			flags in @state.
+ * @rx_lock:		protects @rx_q.
+ * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex:		ensures exclusive access from mac80211 callbacks.
+ * @vendor_req_mutex:	ensures atomicity of vendor requests.
+ * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
+ *			(accesses to RF and BBP).
+ * @hw_atomic_mutex:	ensures exclusive access to HW during critical
+ *			operations (power management, channel switch).
+ */
+struct mt7601u_dev {
+	struct ieee80211_hw *hw;
+	struct device *dev;
+
+	unsigned long state;
+
+	struct mutex mutex;
+
+	unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
+
+	struct cfg80211_chan_def chandef;
+	struct ieee80211_supported_band *sband_2g;
+
+	struct mt7601u_mcu mcu;
+
+	struct delayed_work cal_work;
+	struct delayed_work mac_work;
+
+	struct workqueue_struct *stat_wq;
+	struct delayed_work stat_work;
+
+	struct mt76_wcid *mon_wcid;
+	struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+	spinlock_t lock;
+
+	const u16 *beacon_offsets;
+
+	u8 macaddr[ETH_ALEN];
+	struct mt7601u_eeprom_params *ee;
+
+	struct mutex vendor_req_mutex;
+	struct mutex reg_atomic_mutex;
+	struct mutex hw_atomic_mutex;
+
+	u32 rxfilter;
+	u32 debugfs_reg;
+
+	u8 out_eps[8];
+	u8 in_eps[8];
+	u16 out_max_packet;
+	u16 in_max_packet;
+
+	/* TX */
+	spinlock_t tx_lock;
+	struct mt7601u_tx_queue *tx_q;
+
+	atomic_t avg_ampdu_len;
+
+	/* RX */
+	spinlock_t rx_lock;
+	struct tasklet_struct rx_tasklet;
+	struct mt7601u_rx_queue rx_q;
+
+	/* Connection monitoring things */
+	spinlock_t con_mon_lock;
+	u8 ap_bssid[ETH_ALEN];
+
+	s8 bcn_freq_off;
+	u8 bcn_phy_mode;
+
+	int avg_rssi; /* starts at 0 and converges */
+
+	u8 agc_save;
+
+	struct mt7601u_freq_cal freq_cal;
+
+	bool tssi_read_trig;
+
+	s8 tssi_init;
+	s8 tssi_init_hvga;
+	s16 tssi_init_hvga_offset_db;
+
+	int prev_pwr_diff;
+
+	enum mt_temp_mode temp_mode;
+	int curr_temp;
+	int dpd_temp;
+	s8 raw_temp;
+	bool pll_lock_protect;
+
+	u8 bw;
+	bool chan_ext_below;
+
+	/* PA mode */
+	u32 rf_pa_mode[2];
+
+	struct mac_stats stats;
+};
+
+struct mt7601u_tssi_params {
+	char tssi0;
+	int trgt_power;
+};
+
+struct mt76_wcid {
+	u8 idx;
+	u8 hw_key_idx;
+
+	u16 tx_rate;
+	bool tx_rate_set;
+	u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+	u8 idx;
+
+	struct mt76_wcid group_wcid;
+};
+
+struct mt76_sta {
+	struct mt76_wcid wcid;
+	u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+struct mt7601u_rxwi;
+
+extern const struct ieee80211_ops mt7601u_ops;
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev);
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+		     const void *data, int len);
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+	       int timeout);
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+		    int timeout);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val)	\
+	mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+
+static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
+{
+	return mt7601u_rr(dev, offset);
+}
+
+static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	return mt7601u_wr(dev, offset, val);
+}
+
+static inline u32
+mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	return mt7601u_rmw(dev, offset, mask, val);
+}
+
+static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	return mt76_rmw(dev, offset, 0, val);
+}
+
+static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	return mt76_rmw(dev, offset, val, 0);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+			    const struct mt76_reg_pair *data, int len);
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+			     const u32 *data, int n);
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
+int mt7601u_init_hardware(struct mt7601u_dev *dev);
+int mt7601u_register_device(struct mt7601u_dev *dev);
+void mt7601u_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_mac_start(struct mt7601u_dev *dev);
+void mt7601u_mac_stop(struct mt7601u_dev *dev);
+
+/* PHY */
+int mt7601u_phy_init(struct mt7601u_dev *dev);
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
+void mt7601u_agc_save(struct mt7601u_dev *dev);
+void mt7601u_agc_restore(struct mt7601u_dev *dev);
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+			    struct cfg80211_chan_def *chandef);
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+			 struct mt7601u_rxwi *rxwi, u16 rate);
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+			       struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt7601u_mac_work(struct work_struct *work);
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+				int ht_mode);
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
+
+/* TX */
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb);
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
+void mt7601u_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76_remove_hdr_pad(struct sk_buff *skb);
+int mt76_insert_hdr_pad(struct sk_buff *skb);
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
+
+static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+	return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev);
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			   struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
new file mode 100644
index 0000000..1908af6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -0,0 +1,1251 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev);
+
+static int
+mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
+{
+	int ret = 0;
+
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+	    WARN_ON(offset > 63))
+		return -EINVAL;
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
+				       MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+				       MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+				       MT_RF_CSR_CFG_WR |
+				       MT_RF_CSR_CFG_KICK);
+	trace_rf_write(dev, bank, offset, value);
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n",
+			bank, offset, ret);
+
+	return ret;
+}
+
+static int
+mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
+{
+	int ret = -ETIMEDOUT;
+	u32 val;
+
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+	    WARN_ON(offset > 63))
+		return -EINVAL;
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0xff;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+				       MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+				       MT_RF_CSR_CFG_KICK);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	val = mt7601u_rr(dev, MT_RF_CSR_CFG);
+	if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+	    MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+		ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+		trace_rf_read(dev, bank, offset, ret);
+	}
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n",
+			bank, offset, ret);
+
+	return ret;
+}
+
+static int
+mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt7601u_rf_rr(dev, bank, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	ret = mt7601u_rf_wr(dev, bank, offset, val);
+	if (ret)
+		return ret;
+
+	return val;
+}
+
+static int
+mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val)
+{
+	return mt7601u_rf_rmw(dev, bank, offset, 0, val);
+}
+
+static int
+mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask)
+{
+	return mt7601u_rf_rmw(dev, bank, offset, mask, 0);
+}
+
+static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
+{
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+	    test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) {
+		dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset);
+		goto out;
+	}
+
+	mt7601u_wr(dev, MT_BBP_CSR_CFG,
+		   MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
+		   MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+		   MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
+	trace_bbp_write(dev, offset, val);
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+}
+
+static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
+{
+	u32 val;
+	int ret = -ETIMEDOUT;
+
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)))
+		return -EINVAL;
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0xff;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+		goto out;
+
+	mt7601u_wr(dev, MT_BBP_CSR_CFG,
+		   MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+		   MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
+		   MT_BBP_CSR_CFG_READ);
+
+	if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+		goto out;
+
+	val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
+	if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+		ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+		trace_bbp_read(dev, offset, ret);
+	}
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n",
+			offset, ret);
+
+	return ret;
+}
+
+static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt7601u_bbp_rr(dev, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	mt7601u_bbp_wr(dev, offset, val);
+
+	return val;
+}
+
+static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt7601u_bbp_rr(dev, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	if (ret != val)
+		mt7601u_bbp_wr(dev, offset, val);
+
+	return val;
+}
+
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+{
+	int i = 20;
+	u8 val;
+
+	do {
+		val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+		if (val && ~val)
+			break;
+	} while (--i);
+
+	if (!i) {
+		dev_err(dev->dev, "Error: BBP is not ready\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+	return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0);
+}
+
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+			 struct mt7601u_rxwi *rxwi, u16 rate)
+{
+	static const s8 lna[2][2][3] = {
+		/* main LNA */ {
+			/* bw20 */ { -2, 15, 33 },
+			/* bw40 */ {  0, 16, 34 }
+		},
+		/*  aux LNA */ {
+			/* bw20 */ { -2, 15, 33 },
+			/* bw40 */ { -2, 16, 34 }
+		}
+	};
+	int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
+	int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+	int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+	int val;
+
+	if (lna_id) /* LNA id can be 0, 2, 3. */
+		lna_id--;
+
+	val = 8;
+	val -= lna[aux_lna][bw][lna_id];
+	val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+	val -= dev->ee->lna_gain;
+	val -= dev->ee->rssi_offset[0];
+
+	return val;
+}
+
+static void mt7601u_vco_cal(struct mt7601u_dev *dev)
+{
+	mt7601u_rf_wr(dev, 0, 4, 0x0a);
+	mt7601u_rf_wr(dev, 0, 5, 0x20);
+	mt7601u_rf_set(dev, 0, 4, BIT(7));
+	msleep(2);
+}
+
+static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal)
+{
+	u32 filter = 0;
+	int ret;
+
+	if (!cal)
+		filter |= 0x10000;
+	if (dev->bw != MT_BW_20)
+		filter |= 0x00100;
+
+	/* TX */
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1);
+	if (ret)
+		return ret;
+	/* RX */
+	return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter);
+}
+
+static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev)
+{
+	const struct reg_table *t;
+
+	if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW))
+		return -EINVAL;
+
+	t = &bbp_mode_table[dev->temp_mode][dev->bw];
+
+	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
+}
+
+static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name)
+{
+	const struct reg_table *t;
+	int ret;
+
+	if (dev->temp_mode == mode)
+		return 0;
+
+	dev->temp_mode = mode;
+	trace_temp_mode(dev, mode);
+
+	t = bbp_mode_table[dev->temp_mode];
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      t[2].regs, t[2].n);
+	if (ret)
+		return ret;
+
+	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				       t[dev->bw].regs, t[dev->bw].n);
+}
+
+static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan)
+{
+	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+	if (hw_chan != 14 || dev->bw != MT_BW_20) {
+		mt7601u_bbp_rmw(dev, 4, 0x20, 0);
+		mt7601u_bbp_wr(dev, 178, 0xff);
+
+		t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
+		t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
+	} else { /* Apply CH14 OBW fixup */
+		mt7601u_bbp_wr(dev, 4, 0x60);
+		mt7601u_bbp_wr(dev, 178, 0);
+
+		/* Note: vendor code is buggy here for negative values */
+		t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
+		t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
+	}
+}
+
+static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+				     struct cfg80211_chan_def *chandef)
+{
+#define FREQ_PLAN_REGS	4
+	static const u8 freq_plan[14][FREQ_PLAN_REGS] = {
+		{ 0x99,	0x99,	0x09,	0x50 },
+		{ 0x46,	0x44,	0x0a,	0x50 },
+		{ 0xec,	0xee,	0x0a,	0x50 },
+		{ 0x99,	0x99,	0x0b,	0x50 },
+		{ 0x46,	0x44,	0x08,	0x51 },
+		{ 0xec,	0xee,	0x08,	0x51 },
+		{ 0x99,	0x99,	0x09,	0x51 },
+		{ 0x46,	0x44,	0x0a,	0x51 },
+		{ 0xec,	0xee,	0x0a,	0x51 },
+		{ 0x99,	0x99,	0x0b,	0x51 },
+		{ 0x46,	0x44,	0x08,	0x52 },
+		{ 0xec,	0xee,	0x08,	0x52 },
+		{ 0x99,	0x99,	0x09,	0x52 },
+		{ 0x33,	0x33,	0x0b,	0x52 },
+	};
+	struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = {
+		{ 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 },
+	};
+	struct mt76_reg_pair bbp_settings[3] = {
+		{ 62, 0x37 - dev->ee->lna_gain },
+		{ 63, 0x37 - dev->ee->lna_gain },
+		{ 64, 0x37 - dev->ee->lna_gain },
+	};
+
+	struct ieee80211_channel *chan = chandef->chan;
+	enum nl80211_channel_type chan_type =
+		cfg80211_get_chandef_type(chandef);
+	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+	int chan_idx;
+	bool chan_ext_below;
+	u8 bw;
+	int i, ret;
+
+	bw = MT_BW_20;
+	chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS);
+	chan_idx = chan->hw_value - 1;
+
+	if (chandef->width == NL80211_CHAN_WIDTH_40) {
+		bw = MT_BW_40;
+
+		if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS)
+			chan_idx -= 2;
+		else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS)
+			chan_idx += 2;
+		else
+			dev_err(dev->dev, "Error: invalid 40MHz channel!!\n");
+	}
+
+	if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) {
+		dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n",
+			bw, chan_ext_below);
+
+		mt7601u_bbp_set_bw(dev, bw);
+
+		mt7601u_bbp_set_ctrlch(dev, chan_ext_below);
+		mt7601u_mac_set_ctrlch(dev, chan_ext_below);
+		dev->chan_ext_below = chan_ext_below;
+	}
+
+	for (i = 0; i < FREQ_PLAN_REGS; i++)
+		channel_freq_plan[i].value = freq_plan[chan_idx][i];
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF,
+				      channel_freq_plan, FREQ_PLAN_REGS);
+	if (ret)
+		return ret;
+
+	mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f,
+		    dev->ee->chan_pwr[chan_idx] & 0x3f);
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      bbp_settings, ARRAY_SIZE(bbp_settings));
+	if (ret)
+		return ret;
+
+	mt7601u_vco_cal(dev);
+	mt7601u_bbp_set_bw(dev, bw);
+	ret = mt7601u_set_bw_filter(dev, false);
+	if (ret)
+		return ret;
+
+	mt7601u_apply_ch14_fixup(dev, chan->hw_value);
+	mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
+					 int_to_s6(t->ofdm[0].bw20) << 16 |
+					 int_to_s6(t->cck[1].bw20) << 8 |
+					 int_to_s6(t->cck[0].bw20));
+
+	if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+		mt7601u_agc_reset(dev);
+
+	dev->chandef = *chandef;
+
+	return 0;
+}
+
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+			    struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->freq_cal.work);
+
+	mutex_lock(&dev->hw_atomic_mutex);
+	ret = __mt7601u_phy_set_channel(dev, chandef);
+	mutex_unlock(&dev->hw_atomic_mutex);
+	if (ret)
+		return ret;
+
+	if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+		return 0;
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	if (dev->freq_cal.enabled)
+		ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+					     MT_FREQ_CAL_INIT_DELAY);
+	return 0;
+}
+
+#define BBP_R47_FLAG		GENMASK(2, 0)
+#define BBP_R47_F_TSSI		0
+#define BBP_R47_F_PKT_T		1
+#define BBP_R47_F_TX_RATE	2
+#define BBP_R47_F_TEMP		4
+/**
+ * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair
+ * @dev:	pointer to adapter structure
+ * @reg:	value of BBP R47 before the operation
+ * @flag:	one of the BBP_R47_F_* flags
+ *
+ * Convenience helper for reading values through BBP R47/R49 pair.
+ * Takes old value of BBP R47 as @reg, because callers usually have it
+ * cached already.
+ *
+ * Return: value of BBP R49.
+ */
+static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag)
+{
+	flag |= reg & ~BBP_R47_FLAG;
+	mt7601u_bbp_wr(dev, 47, flag);
+	usleep_range(500, 700);
+	return mt7601u_bbp_rr(dev, 49);
+}
+
+static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev)
+{
+	u8 bbp_val, temp;
+	u32 rf_bp, rf_set;
+	int i;
+
+	rf_set = mt7601u_rr(dev, MT_RF_SETTING_0);
+	rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0);
+
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+	mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010);
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010);
+
+	bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10);
+
+	mt7601u_bbp_wr(dev, 22, 0x40);
+
+	for (i = 100; i && (bbp_val & 0x10); i--)
+		bbp_val = mt7601u_bbp_rr(dev, 47);
+
+	temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP);
+
+	mt7601u_bbp_wr(dev, 22, 0);
+
+	bbp_val = mt7601u_bbp_rr(dev, 21);
+	bbp_val |= 0x02;
+	mt7601u_bbp_wr(dev, 21, bbp_val);
+	bbp_val &= ~0x02;
+	mt7601u_bbp_wr(dev, 21, bbp_val);
+
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+	mt7601u_wr(dev, MT_RF_SETTING_0, rf_set);
+	mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp);
+
+	trace_read_temp(dev, temp);
+	return temp;
+}
+
+static s8 mt7601u_read_temp(struct mt7601u_dev *dev)
+{
+	int i;
+	u8 val;
+	s8 temp;
+
+	val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10);
+
+	/* Note: this rarely succeeds, temp can change even if it fails. */
+	for (i = 100; i && (val & 0x10); i--)
+		val = mt7601u_bbp_rr(dev, 47);
+
+	temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP);
+
+	trace_read_temp(dev, temp);
+	return temp;
+}
+
+static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
+{
+	static const struct mt76_reg_pair intro[] = {
+		{ 158, 0x8d }, { 159, 0xfc },
+		{ 158, 0x8c }, { 159, 0x4c },
+	}, outro[] = {
+		{ 158, 0x8d }, { 159, 0xe0 },
+	};
+	u32 mac_ctrl;
+	int i, ret;
+
+	mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      intro, ARRAY_SIZE(intro));
+	if (ret)
+		dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret);
+
+	for (i = 20; i; i--) {
+		usleep_range(300, 500);
+
+		mt7601u_bbp_wr(dev, 158, 0x8c);
+		if (mt7601u_bbp_rr(dev, 159) == 0x0c)
+			break;
+	}
+	if (!i)
+		dev_err(dev->dev, "%s timed out\n", __func__);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      outro, ARRAY_SIZE(outro));
+	if (ret)
+		dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+}
+
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
+{
+	mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
+
+	mt7601u_rxdc_cal(dev);
+}
+
+/* Note: function copied from vendor driver */
+static s16 lin2dBd(u16 linear)
+{
+	short exp = 0;
+	unsigned int mantisa;
+	int app, dBd;
+
+	if (WARN_ON(!linear))
+		return -10000;
+
+	mantisa = linear;
+
+	exp = fls(mantisa) - 16;
+	if (exp > 0)
+		mantisa >>= exp;
+	else
+		mantisa <<= abs(exp);
+
+	if (mantisa <= 0xb800)
+		app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600);
+	else
+		app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00);
+	if (app < 0)
+		app = 0;
+
+	dBd = ((15 + exp) << 15) + app;
+	dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7);
+	dBd = (dBd >> 10);
+
+	return dBd;
+}
+
+static void
+mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db)
+{
+	struct tssi_data *d = &dev->ee->tssi_data;
+	int init_offset;
+
+	init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10;
+
+	mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+		 int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP);
+}
+
+static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev)
+{
+	u8 rf_vga, rf_mixer, bbp_r47;
+	int i, j;
+	s8 res[4];
+	s16 tssi_init_db, tssi_init_hvga_db;
+
+	mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030);
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	mt7601u_bbp_wr(dev, 58, 0);
+	mt7601u_bbp_wr(dev, 241, 0x2);
+	mt7601u_bbp_wr(dev, 23, 0x8);
+	bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+	/* Set VGA gain */
+	rf_vga = mt7601u_rf_rr(dev, 5, 3);
+	mt7601u_rf_wr(dev, 5, 3, 8);
+
+	/* Mixer disable */
+	rf_mixer = mt7601u_rf_rr(dev, 4, 39);
+	mt7601u_rf_wr(dev, 4, 39, 0);
+
+	for (i = 0; i < 4; i++) {
+		mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0);
+
+		mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02);
+		mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11);
+
+		/* BBP TSSI initial and soft reset */
+		mt7601u_bbp_wr(dev, 22, 0);
+		mt7601u_bbp_wr(dev, 244, 0);
+
+		mt7601u_bbp_wr(dev, 21, 1);
+		udelay(1);
+		mt7601u_bbp_wr(dev, 21, 0);
+
+		/* TSSI measurement */
+		mt7601u_bbp_wr(dev, 47, 0x50);
+		mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40);
+
+		for (j = 20; j; j--)
+			if (!(mt7601u_bbp_rr(dev, 47) & 0x10))
+				break;
+		if (!j)
+			dev_err(dev->dev, "%s timed out\n", __func__);
+
+		/* TSSI read */
+		mt7601u_bbp_wr(dev, 47, 0x40);
+		res[i] = mt7601u_bbp_rr(dev, 49);
+	}
+
+	tssi_init_db = lin2dBd((short)res[1] - res[0]);
+	tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4);
+	dev->tssi_init = res[0];
+	dev->tssi_init_hvga = res[2];
+	dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db;
+
+	dev_dbg(dev->dev,
+		"TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n",
+		dev->tssi_init, tssi_init_db, dev->tssi_init_hvga,
+		tssi_init_hvga_db, dev->tssi_init_hvga_offset_db);
+
+	mt7601u_bbp_wr(dev, 22, 0);
+	mt7601u_bbp_wr(dev, 244, 0);
+
+	mt7601u_bbp_wr(dev, 21, 1);
+	udelay(1);
+	mt7601u_bbp_wr(dev, 21, 0);
+
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+	mt7601u_wr(dev, MT_RF_SETTING_0, 0);
+
+	mt7601u_rf_wr(dev, 5, 3, rf_vga);
+	mt7601u_rf_wr(dev, 4, 39, rf_mixer);
+	mt7601u_bbp_wr(dev, 47, bbp_r47);
+
+	mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db);
+}
+
+static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on)
+{
+	int ret, temp, hi_temp = 400, lo_temp = -200;
+
+	temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE;
+	dev->curr_temp = temp;
+
+	/* DPD Calibration */
+	if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) {
+		dev->dpd_temp = temp;
+
+		ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+		if (ret)
+			return ret;
+
+		mt7601u_vco_cal(dev);
+
+		dev_dbg(dev->dev, "Recalibrate DPD\n");
+	}
+
+	/* PLL Lock Protect */
+	if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */
+		dev->pll_lock_protect =  true;
+
+		mt7601u_rf_wr(dev, 4, 4, 6);
+		mt7601u_rf_clear(dev, 4, 10, 0x30);
+
+		dev_dbg(dev->dev, "PLL lock protect on - too cold\n");
+	} else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */
+		dev->pll_lock_protect = false;
+
+		mt7601u_rf_wr(dev, 4, 4, 0);
+		mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10);
+
+		dev_dbg(dev->dev, "PLL lock protect off\n");
+	}
+
+	if (on) {
+		hi_temp -= 50;
+		lo_temp -= 50;
+	}
+
+	/* BBP CR for H, L, N temperature */
+	if (temp > hi_temp)
+		return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high");
+	else if (temp > lo_temp)
+		return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal");
+	else
+		return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low");
+}
+
+/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */
+static int mt7601u_current_tx_power(struct mt7601u_dev *dev)
+{
+	return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1];
+}
+
+static bool mt7601u_use_hvga(struct mt7601u_dev *dev)
+{
+	return !(mt7601u_current_tx_power(dev) > 20);
+}
+
+static s16
+mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
+{
+	static const s16 decode_tb[] = { 0, 8847, -5734, -5734 };
+	u32 reg;
+
+	switch (phy_mode) {
+	case MT_PHY_TYPE_OFDM:
+		tx_rate += 4;
+	case MT_PHY_TYPE_CCK:
+		reg = dev->rf_pa_mode[0];
+		break;
+	default:
+		reg = dev->rf_pa_mode[1];
+		break;
+	}
+
+	return decode_tb[(reg >> (tx_rate * 2)) & 0x3];
+}
+
+static struct mt7601u_tssi_params
+mt7601u_tssi_params_get(struct mt7601u_dev *dev)
+{
+	static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 };
+	static const int static_power[4] = { 0, -49152, -98304, 49152 };
+	struct mt7601u_tssi_params p;
+	u8 bbp_r47, pkt_type, tx_rate;
+	struct power_per_rate *rate_table;
+
+	bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+	p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
+	dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP);
+	pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T);
+
+	p.trgt_power = mt7601u_current_tx_power(dev);
+
+	switch (pkt_type & 0x03) {
+	case MT_PHY_TYPE_CCK:
+		tx_rate = (pkt_type >> 4) & 0x03;
+		rate_table = dev->ee->power_rate_table.cck;
+		break;
+
+	case MT_PHY_TYPE_OFDM:
+		tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07];
+		rate_table = dev->ee->power_rate_table.ofdm;
+		break;
+
+	default:
+		tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE);
+		tx_rate &= 0x7f;
+		rate_table = dev->ee->power_rate_table.ht;
+		break;
+	}
+
+	if (dev->bw == MT_BW_20)
+		p.trgt_power += rate_table[tx_rate / 2].bw20;
+	else
+		p.trgt_power += rate_table[tx_rate / 2].bw40;
+
+	p.trgt_power <<= 12;
+
+	dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
+
+	p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
+						   tx_rate);
+
+	/* Channel 14, cck, bw20 */
+	if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) {
+		if (mt7601u_bbp_rr(dev, 4) & 0x20)
+			p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
+		else
+			p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
+	}
+
+	p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
+
+	p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
+
+	dev_dbg(dev->dev,
+		"tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n",
+		p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
+
+	return p;
+}
+
+static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev)
+{
+	return !(mt7601u_bbp_rr(dev, 47) & 0x10);
+}
+
+static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
+{
+	struct mt7601u_tssi_params params;
+	int curr_pwr, diff_pwr;
+	char tssi_offset;
+	s8 tssi_init;
+	s16 tssi_m_dc, tssi_db;
+	bool hvga;
+	u32 val;
+
+	if (!dev->ee->tssi_enabled)
+		return 0;
+
+	hvga = mt7601u_use_hvga(dev);
+	if (!dev->tssi_read_trig)
+		return mt7601u_mcu_tssi_read_kick(dev, hvga);
+
+	if (!mt7601u_tssi_read_ready(dev))
+		return 0;
+
+	params = mt7601u_tssi_params_get(dev);
+
+	tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init);
+	tssi_m_dc = params.tssi0 - tssi_init;
+	tssi_db = lin2dBd(tssi_m_dc);
+	dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n",
+		tssi_m_dc, tssi_db, hvga);
+
+	if (dev->chandef.chan->hw_value < 5)
+		tssi_offset = dev->ee->tssi_data.offset[0];
+	else if (dev->chandef.chan->hw_value < 9)
+		tssi_offset = dev->ee->tssi_data.offset[1];
+	else
+		tssi_offset = dev->ee->tssi_data.offset[2];
+
+	if (hvga)
+		tssi_db -= dev->tssi_init_hvga_offset_db;
+
+	curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9);
+	diff_pwr = params.trgt_power - curr_pwr;
+	dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr);
+
+	if (params.tssi0 > 126 && diff_pwr > 0) {
+		dev_err(dev->dev, "Error: TSSI upper saturation\n");
+		diff_pwr = 0;
+	}
+	if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) {
+		dev_err(dev->dev, "Error: TSSI lower saturation\n");
+		diff_pwr = 0;
+	}
+
+	if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 &&
+	    (abs(diff_pwr) > abs(dev->prev_pwr_diff) ||
+	     (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff)))
+		diff_pwr = 0;
+	else
+		dev->prev_pwr_diff = diff_pwr;
+
+	diff_pwr += (diff_pwr > 0) ? 2048 : -2048;
+	diff_pwr /= 4096;
+
+	dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
+
+	val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
+	curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+	diff_pwr += curr_pwr;
+	val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
+	mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
+
+	return mt7601u_mcu_tssi_read_kick(dev, hvga);
+}
+
+static u8 mt7601u_agc_default(struct mt7601u_dev *dev)
+{
+	return (dev->ee->lna_gain - 8) * 2 + 0x34;
+}
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev)
+{
+	u8 agc = mt7601u_agc_default(dev);
+
+	mt7601u_bbp_wr(dev, 66,	agc);
+}
+
+void mt7601u_agc_save(struct mt7601u_dev *dev)
+{
+	dev->agc_save = mt7601u_bbp_rr(dev, 66);
+}
+
+void mt7601u_agc_restore(struct mt7601u_dev *dev)
+{
+	mt7601u_bbp_wr(dev, 66, dev->agc_save);
+}
+
+static void mt7601u_agc_tune(struct mt7601u_dev *dev)
+{
+	u8 val = mt7601u_agc_default(dev);
+
+	if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+		return;
+
+	/* Note: only in STA mode and not dozing; perhaps do this only if
+	 *	 there is enough rssi updates since last run?
+	 *	 Rssi updates are only on beacons and U2M so should work...
+	 */
+	spin_lock_bh(&dev->con_mon_lock);
+	if (dev->avg_rssi <= -70)
+		val -= 0x20;
+	else if (dev->avg_rssi <= -60)
+		val -= 0x10;
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	if (val != mt7601u_bbp_rr(dev, 66))
+		mt7601u_bbp_wr(dev, 66, val);
+
+	/* TODO: also if lost a lot of beacons try resetting
+	 *       (see RTMPSetAGCInitValue() call in mlme.c).
+	 */
+}
+
+static void mt7601u_phy_calibrate(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					    cal_work.work);
+
+	mt7601u_agc_tune(dev);
+	mt7601u_tssi_cal(dev);
+	/* If TSSI calibration was run it already updated temperature. */
+	if (!dev->ee->tssi_enabled)
+		dev->raw_temp = mt7601u_read_temp(dev);
+	mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+static unsigned long
+__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode)
+{
+	u8 activate_threshold, deactivate_threshold;
+
+	trace_freq_cal_offset(dev, phy_mode, last_offset);
+
+	/* No beacons received - reschedule soon */
+	if (last_offset == MT_FREQ_OFFSET_INVALID)
+		return MT_FREQ_CAL_ADJ_INTERVAL;
+
+	switch (phy_mode) {
+	case MT_PHY_TYPE_CCK:
+		activate_threshold = 19;
+		deactivate_threshold = 5;
+		break;
+	case MT_PHY_TYPE_OFDM:
+		activate_threshold = 102;
+		deactivate_threshold = 32;
+		break;
+	case MT_PHY_TYPE_HT:
+	case MT_PHY_TYPE_HT_GF:
+		activate_threshold = 82;
+		deactivate_threshold = 20;
+		break;
+	default:
+		WARN_ON(1);
+		return MT_FREQ_CAL_CHECK_INTERVAL;
+	}
+
+	if (abs(last_offset) >= activate_threshold)
+		dev->freq_cal.adjusting = true;
+	else if (abs(last_offset) <= deactivate_threshold)
+		dev->freq_cal.adjusting = false;
+
+	if (!dev->freq_cal.adjusting)
+		return MT_FREQ_CAL_CHECK_INTERVAL;
+
+	if (last_offset > deactivate_threshold) {
+		if (dev->freq_cal.freq > 0)
+			dev->freq_cal.freq--;
+		else
+			dev->freq_cal.adjusting = false;
+	} else if (last_offset < -deactivate_threshold) {
+		if (dev->freq_cal.freq < 0xbf)
+			dev->freq_cal.freq++;
+		else
+			dev->freq_cal.adjusting = false;
+	}
+
+	trace_freq_cal_adjust(dev, dev->freq_cal.freq);
+	mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq);
+	mt7601u_vco_cal(dev);
+
+	return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL :
+					 MT_FREQ_CAL_CHECK_INTERVAL;
+}
+
+static void mt7601u_phy_freq_cal(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					       freq_cal.work.work);
+	s8 last_offset;
+	u8 phy_mode;
+	unsigned long delay;
+
+	spin_lock_bh(&dev->con_mon_lock);
+	last_offset = dev->bcn_freq_off;
+	phy_mode = dev->bcn_phy_mode;
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode);
+	ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
+
+	spin_lock_bh(&dev->con_mon_lock);
+	dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+	spin_unlock_bh(&dev->con_mon_lock);
+}
+
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+			       struct ieee80211_bss_conf *info)
+{
+	if (!info->assoc)
+		cancel_delayed_work_sync(&dev->freq_cal.work);
+
+	/* Start/stop collecting beacon data */
+	spin_lock_bh(&dev->con_mon_lock);
+	ether_addr_copy(dev->ap_bssid, info->bssid);
+	dev->avg_rssi = 0;
+	dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	dev->freq_cal.freq = dev->ee->rf_freq_off;
+	dev->freq_cal.enabled = info->assoc;
+	dev->freq_cal.adjusting = false;
+
+	if (info->assoc)
+		ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+					     MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int mt7601u_init_cal(struct mt7601u_dev *dev)
+{
+	u32 mac_ctrl;
+	int ret;
+
+	dev->raw_temp = mt7601u_read_bootup_temp(dev);
+	dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) *
+		MT_EE_TEMPERATURE_SLOPE;
+	dev->dpd_temp = dev->curr_temp;
+
+	mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0);
+	if (ret)
+		return ret;
+
+	ret = mt7601u_rf_rr(dev, 0, 4);
+	if (ret < 0)
+		return ret;
+	ret |= 0x80;
+	ret = mt7601u_rf_wr(dev, 0, 4, ret);
+	if (ret)
+		return ret;
+	msleep(2);
+
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0);
+	if (ret)
+		return ret;
+
+	mt7601u_rxdc_cal(dev);
+
+	ret = mt7601u_set_bw_filter(dev, true);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+	if (ret)
+		return ret;
+
+	mt7601u_rxdc_cal(dev);
+
+	mt7601u_tssi_dc_gain_cal(dev);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+
+	mt7601u_temp_comp(dev, true);
+
+	return 0;
+}
+
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw)
+{
+	u32 val, old;
+
+	if (bw == dev->bw) {
+		/* Vendor driver does the rmc even when no change is needed. */
+		mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+		return 0;
+	}
+	dev->bw = bw;
+
+	/* Stop MAC for the time of bw change */
+	old = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+	val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, val);
+	mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+		  0, 500000);
+
+	mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, old);
+
+	return mt7601u_load_bbp_temp_table_bw(dev);
+}
+
+/**
+ * mt7601u_set_rx_path - set rx path in BBP
+ * @dev:	pointer to adapter structure
+ * @path:	rx path to set values are 0-based
+ */
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
+{
+	mt7601u_bbp_rmw(dev, 3, 0x18, path << 3);
+}
+
+/**
+ * mt7601u_set_tx_dac - set which tx DAC to use
+ * @dev:	pointer to adapter structure
+ * @path:	DAC index, values are 0-based
+ */
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
+{
+	mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3);
+}
+
+int mt7601u_phy_init(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0);
+	dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1);
+
+	ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off);
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, 0, rf_central,
+				      ARRAY_SIZE(rf_central));
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, 0, rf_channel,
+				      ARRAY_SIZE(rf_channel));
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga));
+	if (ret)
+		return ret;
+
+	ret = mt7601u_init_cal(dev);
+	if (ret)
+		return ret;
+
+	dev->prev_pwr_diff = 100;
+
+	INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate);
+	INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
new file mode 100644
index 0000000..afd8978
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#ifndef GENMASK
+#define GENMASK(h, l)       (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif
+
+#define MT_ASIC_VERSION			0x0000
+
+#define MT76XX_REV_E3		0x22
+#define MT76XX_REV_E4		0x33
+
+#define MT_CMB_CTRL			0x0020
+#define MT_CMB_CTRL_XTAL_RDY		BIT(22)
+#define MT_CMB_CTRL_PLL_LD		BIT(23)
+
+#define MT_EFUSE_CTRL			0x0024
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_DATA_BASE		0x0028
+#define MT_EFUSE_DATA(_n)		(MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0			0x0040
+#define MT_COEXCFG0_COEX_EN		BIT(0)
+
+#define MT_WLAN_FUN_CTRL		0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN	BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN	BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF	BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET	BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN	BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ	BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL	BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL	BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST	BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST	BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN	BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN	GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT	GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN	GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0			0x0100
+#define MT_XO_CTRL1			0x0104
+#define MT_XO_CTRL2			0x0108
+#define MT_XO_CTRL3			0x010c
+#define MT_XO_CTRL4			0x0110
+
+#define MT_XO_CTRL5			0x0114
+#define MT_XO_CTRL5_C2_VAL		GENMASK(14, 8)
+
+#define MT_XO_CTRL6			0x0118
+#define MT_XO_CTRL6_C2_CTRL		GENMASK(14, 8)
+
+#define MT_XO_CTRL7			0x011c
+
+#define MT_WLAN_MTC_CTRL		0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S	BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD	GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD	BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD	BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD	BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB	BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB	BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB	BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB	BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP	BIT(28)
+
+#define MT_INT_SOURCE_CSR		0x0200
+#define MT_INT_MASK_CSR			0x0204
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)		BIT(_n + 4)
+#define MT_INT_RX_COHERENT		BIT(16)
+#define MT_INT_TX_COHERENT		BIT(17)
+#define MT_INT_ANY_COHERENT		BIT(18)
+#define MT_INT_MCU_CMD			BIT(19)
+#define MT_INT_TBTT			BIT(20)
+#define MT_INT_PRE_TBTT			BIT(21)
+#define MT_INT_TX_STAT			BIT(22)
+#define MT_INT_AUTO_WAKEUP		BIT(23)
+#define MT_INT_GPTIMER			BIT(24)
+#define MT_INT_RXDELAYINT		BIT(26)
+#define MT_INT_TXDELAYINT		BIT(27)
+
+#define MT_WPDMA_GLO_CFG		0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG		0x0210
+
+#define MT_WMM_AIFSN		0x0214
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMIN		0x0218
+#define MT_WMM_CWMIN_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX		0x021c
+#define MT_WMM_CWMAX_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_TXOP_BASE		0x0220
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_FCE_DMA_ADDR			0x0230
+#define MT_FCE_DMA_LEN			0x0234
+
+#define MT_USB_DMA_CFG			0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT	GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT	GENMASK(15, 8)
+#define MT_USB_DMA_CFG_PHY_CLR		BIT(16)
+#define MT_USB_DMA_CFG_TX_CLR		BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT	BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN	BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN	BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN	BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP	BIT(25)
+#define MT_USB_DMA_CFG_EP_OUT_VALID	GENMASK(29, 27)
+#define MT_USB_DMA_CFG_RX_BUSY		BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY		BIT(31)
+
+#define MT_TSO_CTRL			0x0250
+#define MT_HEADER_TRANS_CTRL_REG	0x0260
+
+#define MT_US_CYC_CFG			0x02a4
+#define MT_US_CYC_CNT			GENMASK(7, 0)
+
+#define MT_TX_RING_BASE			0x0300
+#define MT_RX_RING_BASE			0x03c0
+#define MT_RING_SIZE			0x10
+
+#define MT_TX_HW_QUEUE_MCU		8
+#define MT_TX_HW_QUEUE_MGMT		9
+
+#define MT_PBF_SYS_CTRL			0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET	BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET	BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET	BIT(4)
+
+#define MT_PBF_CFG			0x0404
+#define MT_PBF_CFG_TX0Q_EN		BIT(0)
+#define MT_PBF_CFG_TX1Q_EN		BIT(1)
+#define MT_PBF_CFG_TX2Q_EN		BIT(2)
+#define MT_PBF_CFG_TX3Q_EN		BIT(3)
+#define MT_PBF_CFG_RX0Q_EN		BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN		BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT		0x0408
+#define MT_PBF_RX_MAX_PCNT		0x040c
+
+#define MT_BCN_OFFSET_BASE		0x041c
+#define MT_BCN_OFFSET(_n)		(MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define	MT_RF_CSR_CFG			0x0500
+#define MT_RF_CSR_CFG_DATA		GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID		GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK		GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR		BIT(30)
+#define MT_RF_CSR_CFG_KICK		BIT(31)
+
+#define MT_RF_BYPASS_0			0x0504
+#define MT_RF_BYPASS_1			0x0508
+#define MT_RF_SETTING_0			0x050c
+
+#define MT_RF_DATA_WRITE		0x0524
+
+#define MT_RF_CTRL			0x0528
+#define MT_RF_CTRL_ADDR			GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE		BIT(12)
+#define MT_RF_CTRL_BUSY			BIT(13)
+#define MT_RF_CTRL_IDX			BIT(16)
+
+#define MT_RF_DATA_READ			0x052c
+
+#define MT_FCE_PSE_CTRL			0x0800
+#define MT_FCE_PARAMETERS		0x0804
+#define MT_FCE_CSO			0x0808
+
+#define MT_FCE_L2_STUFF			0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN	BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN	BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN	BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN	BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN	BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP	BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN	GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN	GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT	GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR	0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT	0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF		0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1	0x0a38
+
+#define MT_FCE_SKIP_FS			0x0a6c
+
+#define MT_MAC_CSR0			0x1000
+
+#define MT_MAC_SYS_CTRL			0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR	BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP	BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX	BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX	BIT(3)
+
+#define MT_MAC_ADDR_DW0			0x1008
+#define MT_MAC_ADDR_DW1			0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK	GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0		0x1010
+#define MT_MAC_BSSID_DW1		0x1014
+#define MT_MAC_BSSID_DW1_ADDR		GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE	GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N	GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT	BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2	BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3	BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE	GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG			0x1018
+#define MT_MAX_LEN_CFG_AMPDU		GENMASK(13, 12)
+
+#define MT_BBP_CSR_CFG			0x101c
+#define MT_BBP_CSR_CFG_VAL		GENMASK(7, 0)
+#define MT_BBP_CSR_CFG_REG_NUM		GENMASK(15, 8)
+#define MT_BBP_CSR_CFG_READ		BIT(16)
+#define MT_BBP_CSR_CFG_BUSY		BIT(17)
+#define MT_BBP_CSR_CFG_PAR_DUR		BIT(18)
+#define MT_BBP_CSR_CFG_RW_MODE		BIT(19)
+
+#define MT_AMPDU_MAX_LEN_20M1S		0x1030
+#define MT_AMPDU_MAX_LEN_20M2S		0x1034
+#define MT_AMPDU_MAX_LEN_40M1S		0x1038
+#define MT_AMPDU_MAX_LEN_40M2S		0x103c
+#define MT_AMPDU_MAX_LEN		0x1040
+
+#define MT_WCID_DROP_BASE		0x106c
+#define MT_WCID_DROP(_n)		(MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)		BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK		0x108c
+
+#define MT_MAC_APC_BSSID_BASE		0x1090
+#define MT_MAC_APC_BSSID_L(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR		GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN		BIT(16)
+
+#define MT_XIFS_TIME_CFG		0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS	GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS	GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS	GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS		GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN	BIT(29)
+
+#define MT_BKOFF_SLOT_CFG		0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME	GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY	GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG		0x1114
+#define MT_BEACON_TIME_CFG_INTVAL	GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN	BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE	GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN	BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX	BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP	GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG		0x1118
+#define MT_TBTT_TIMER_CFG		0x1124
+
+#define MT_INT_TIMER_CFG		0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT	GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER	GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN			0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN	BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN	BIT(1)
+
+#define MT_MAC_STATUS			0x1200
+#define MT_MAC_STATUS_TX		BIT(0)
+#define MT_MAC_STATUS_RX		BIT(1)
+
+#define MT_PWR_PIN_CFG			0x1204
+#define MT_AUX_CLK_CFG			0x120c
+
+#define MT_BB_PA_MODE_CFG0		0x1214
+#define MT_BB_PA_MODE_CFG1		0x1218
+#define MT_RF_PA_MODE_CFG0		0x121c
+#define MT_RF_PA_MODE_CFG1		0x1220
+
+#define MT_RF_PA_MODE_ADJ0		0x1228
+#define MT_RF_PA_MODE_ADJ1		0x122c
+
+#define MT_DACCLK_EN_DLY_CFG		0x1264
+
+#define MT_EDCA_CFG_BASE		0x1300
+#define MT_EDCA_CFG_AC(_n)		(MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP		GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN		GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN		GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX		GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0			0x1314
+#define MT_TX_PWR_CFG_1			0x1318
+#define MT_TX_PWR_CFG_2			0x131c
+#define MT_TX_PWR_CFG_3			0x1320
+#define MT_TX_PWR_CFG_4			0x1324
+
+#define MT_TX_BAND_CFG			0x132c
+#define MT_TX_BAND_CFG_UPPER_40M	BIT(0)
+#define MT_TX_BAND_CFG_5G		BIT(1)
+#define MT_TX_BAND_CFG_2G		BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY		0x1384
+#define MT_TX_MPDU_ADJ_INT		0x1388
+
+#define MT_TX_PWR_CFG_7			0x13d4
+#define MT_TX_PWR_CFG_8			0x13d8
+#define MT_TX_PWR_CFG_9			0x13dc
+
+#define MT_TX_SW_CFG0			0x1330
+#define MT_TX_SW_CFG1			0x1334
+#define MT_TX_SW_CFG2			0x1338
+
+#define MT_TXOP_CTRL_CFG		0x1340
+#define MT_TXOP_TRUN_EN			GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY		GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG			0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT	GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH		GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK		BIT(24)
+
+#define MT_TX_TIMEOUT_CFG		0x1348
+#define MT_TX_RETRY_CFG			0x134c
+#define MT_TX_LINK_CFG			0x1350
+#define MT_HT_FBK_CFG0			0x1354
+#define MT_HT_FBK_CFG1			0x1358
+#define MT_LG_FBK_CFG0			0x135c
+#define MT_LG_FBK_CFG1			0x1360
+
+#define MT_CCK_PROT_CFG			0x1364
+#define MT_OFDM_PROT_CFG		0x1368
+#define MT_MM20_PROT_CFG		0x136c
+#define MT_MM40_PROT_CFG		0x1370
+#define MT_GF20_PROT_CFG		0x1374
+#define MT_GF40_PROT_CFG		0x1378
+
+#define MT_PROT_RATE			GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS		BIT(16)
+#define MT_PROT_CTRL_CTS2SELF		BIT(17)
+#define MT_PROT_NAV_SHORT		BIT(18)
+#define MT_PROT_NAV_LONG		BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK		BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM		BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20		BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40		BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20		BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40		BIT(25)
+#define MT_PROT_RTS_THR_EN		BIT(26)
+#define MT_PROT_RATE_CCK_11		0x0003
+#define MT_PROT_RATE_OFDM_6		0x4000
+#define MT_PROT_RATE_OFDM_24		0x4004
+#define MT_PROT_RATE_DUP_OFDM_24	0x4084
+#define MT_PROT_TXOP_ALLOW_ALL		GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20		(MT_PROT_TXOP_ALLOW_ALL &	\
+					 ~MT_PROT_TXOP_ALLOW_MM40 &	\
+					 ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME			0x1380
+
+#define MT_TX_PWR_CFG_0_EXT		0x1390
+#define MT_TX_PWR_CFG_1_EXT		0x1394
+
+#define MT_TX_FBK_LIMIT			0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK	GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK	GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR	BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR	BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT	BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR		0x13a0
+#define MT_TX1_RF_GAIN_CORR		0x13a4
+#define MT_TX0_RF_GAIN_ATTEN		0x13a8
+
+#define MT_TX_ALC_CFG_0			0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0	GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1	GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0		GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1		GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1			0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2			0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN		0x13c0
+
+#define MT_TX_ALC_VGA3			0x13c8
+
+#define MT_TX_PROT_CFG6			0x13e0
+#define MT_TX_PROT_CFG7			0x13e4
+#define MT_TX_PROT_CFG8			0x13e8
+
+#define MT_PIFS_TX_CFG			0x13ec
+
+#define MT_RX_FILTR_CFG			0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR		BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR		BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC		BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS	BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR		BIT(4)
+#define MT_RX_FILTR_CFG_MCAST		BIT(5)
+#define MT_RX_FILTR_CFG_BCAST		BIT(6)
+#define MT_RX_FILTR_CFG_DUP		BIT(7)
+#define MT_RX_FILTR_CFG_CFACK		BIT(8)
+#define MT_RX_FILTR_CFG_CFEND		BIT(9)
+#define MT_RX_FILTR_CFG_ACK		BIT(10)
+#define MT_RX_FILTR_CFG_CTS		BIT(11)
+#define MT_RX_FILTR_CFG_RTS		BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL		BIT(13)
+#define MT_RX_FILTR_CFG_BA		BIT(14)
+#define MT_RX_FILTR_CFG_BAR		BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
+
+#define MT_AUTO_RSP_CFG			0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT	BIT(4)
+
+#define MT_LEGACY_BASIC_RATE		0x1408
+#define MT_HT_BASIC_RATE		0x140c
+
+#define MT_RX_PARSER_CFG		0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL	BIT(0)
+
+#define MT_EXT_CCA_CFG			0x141c
+#define MT_EXT_CCA_CFG_CCA0		GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1		GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2		GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3		GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK		GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK	GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3			0x1478
+
+#define MT_PN_PAD_MODE			0x150c
+
+#define MT_TXOP_HLDR_ET			0x1608
+
+#define MT_PROT_AUTO_TX_CFG		0x1648
+
+#define MT_RX_STA_CNT0			0x1700
+#define MT_RX_STA_CNT1			0x1704
+#define MT_RX_STA_CNT2			0x1708
+#define MT_TX_STA_CNT0			0x170c
+#define MT_TX_STA_CNT1			0x1710
+#define MT_TX_STA_CNT2			0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ *	MT_TX_STAT_FIFO_RATE		GENMASK(26, 16)
+ *	MT_TX_STAT_FIFO_ETXBF		BIT(27)
+ *	MT_TX_STAT_FIFO_SND		BIT(28)
+ *	MT_TX_STAT_FIFO_ITXBF		BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO			0x1718
+#define MT_TX_STAT_FIFO_VALID		BIT(0)
+#define MT_TX_STAT_FIFO_PID_TYPE	GENMASK(4, 1)
+#define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
+#define MT_TX_STAT_FIFO_AGGR		BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ		BIT(7)
+#define MT_TX_STAT_FIFO_WCID		GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE		GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT			0x171c
+
+#define MT_TX_AGG_CNT_BASE0		0x1720
+
+#define MT_MPDU_DENSITY_CNT		0x1740
+
+#define MT_TX_AGG_CNT_BASE1		0x174c
+
+#define MT_TX_AGG_CNT(_id)		((_id) < 8 ?			\
+					 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+					 MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT		0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY	GENMASK(7, 0)
+
+#define MT_BBP_CORE_BASE		0x2000
+#define MT_BBP_IBI_BASE			0x2100
+#define MT_BBP_AGC_BASE			0x2300
+#define MT_BBP_TXC_BASE			0x2400
+#define MT_BBP_RXC_BASE			0x2500
+#define MT_BBP_TXO_BASE			0x2600
+#define MT_BBP_TXBE_BASE		0x2700
+#define MT_BBP_RXFE_BASE		0x2800
+#define MT_BBP_RXO_BASE			0x2900
+#define MT_BBP_DFS_BASE			0x2a00
+#define MT_BBP_TR_BASE			0x2b00
+#define MT_BBP_CAL_BASE			0x2c00
+#define MT_BBP_DSC_BASE			0x2e00
+#define MT_BBP_PFMU_BASE		0x2f00
+
+#define MT_BBP(_type, _n)		(MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW		GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN		GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW		GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN		GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN			GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0		GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1		GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN	GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE		0x1800
+#define MT_WCID_ADDR(_n)		(MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE			0x4000
+
+#define MT_WCID_KEY_BASE		0x8000
+#define MT_WCID_KEY(_n)			(MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE			0xa000
+#define MT_WCID_IV(_n)			(MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE		0xa800
+#define MT_WCID_ATTR(_n)		(MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE		BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE		GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX		GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF		GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT	BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT	BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC		BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID		GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0			0xac00
+#define MT_SKEY_BASE_1			0xb400
+#define MT_SKEY_0(_bss, _idx)		\
+	(MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)		\
+	(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)		\
+	((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0		0xb000
+#define MT_SKEY_MODE_BASE_1		0xb3f0
+#define MT_SKEY_MODE_0(_bss)		\
+	(MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)		\
+	(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)		\
+	((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK		GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx)	(4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE			0xc000
+
+#define MT_TEMP_SENSOR			0x1d000
+#define MT_TEMP_SENSOR_VAL		GENMASK(6, 0)
+
+enum mt76_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_CKIP40,
+	MT_CIPHER_CKIP104,
+	MT_CIPHER_CKIP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c
new file mode 100644
index 0000000..8abdd3c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h
new file mode 100644
index 0000000..2898973
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7601U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7601u.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7601u
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name,			\
+				wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s "
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	"%04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_read,
+	TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_write,
+	TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt_submit_urb,
+	TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
+	TP_ARGS(dev, u),
+	TP_STRUCT__entry(
+		DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = u->pipe;
+		__entry->len = u->transfer_buffer_length;
+	),
+	TP_printk(DEV_PR_FMT "p:%08x len:%u",
+		  DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({	\
+	struct urb u;					\
+	u.pipe = __pipe;				\
+	u.transfer_buffer_length = __len;		\
+	trace_mt_submit_urb(__dev, &u);			\
+})
+
+TRACE_EVENT(mt_mcu_msg_send,
+	TP_PROTO(struct mt7601u_dev *dev,
+		 struct sk_buff *skb, u32 csum, bool resp),
+	TP_ARGS(dev, skb, csum, resp),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, info)
+		__field(u32, csum)
+		__field(bool, resp)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->info = *(u32 *)skb->data;
+		__entry->csum = csum;
+		__entry->resp = resp;
+	),
+	TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+		  DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt_vend_req,
+	TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
+		 u16 val, u16 offset, void *buf, size_t buflen, int ret),
+	TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+		__field(u16, val) __field(u16, offset) __field(void*, buf)
+		__field(int, buflen) __field(int, ret)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = pipe;
+		__entry->req = req;
+		__entry->req_type = req_type;
+		__entry->val = val;
+		__entry->offset = offset;
+		__entry->buf = buf;
+		__entry->buflen = buflen;
+		__entry->ret = ret;
+	),
+	TP_printk(DEV_PR_FMT
+		  "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+		  DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+		  __entry->req_type, __entry->val, __entry->offset,
+		  !!__entry->buf, __entry->buflen)
+);
+
+TRACE_EVENT(ee_read,
+	TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
+	TP_ARGS(dev, offset, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(int, o) __field(u16, v)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->o = offset;
+		__entry->v = val;
+	),
+	TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, bank)
+		__field(u8, reg)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+		__entry->bank = bank;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+		DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_read,
+	TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_write,
+	TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, reg)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx=%02hhx",
+		DEV_PR_ARG, __entry->reg, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
+	TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
+	TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->val = val;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_simple_evt, temp_mode,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, read_temp,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(freq_cal_offset,
+	TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
+	TP_ARGS(dev, phy_mode, freq_off),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, phy_mode)
+		__field(s8, freq_off)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->phy_mode = phy_mode;
+		__entry->freq_off = freq_off;
+	),
+	TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+		  DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
+);
+
+TRACE_EVENT(mt_rx,
+	TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
+	TP_ARGS(dev, rxwi, f),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field_struct(struct mt7601u_rxwi, rxwi)
+		__field(u32, fce_info)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->rxwi = *rxwi;
+		__entry->fce_info = f;
+	),
+	TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
+		  "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
+		  "ant:%02hhx gain:%02hhx freq_o:%02hhx "
+		  "r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
+		  le32_to_cpu(__entry->rxwi.rxinfo),
+		  le32_to_cpu(__entry->rxwi.ctl),
+		  le16_to_cpu(__entry->rxwi.frag_sn),
+		  le16_to_cpu(__entry->rxwi.rate),
+		  __entry->rxwi.unknown,
+		  __entry->rxwi.zero[0], __entry->rxwi.zero[1],
+		  __entry->rxwi.zero[2],
+		  __entry->rxwi.snr, __entry->rxwi.ant,
+		  __entry->rxwi.gain, __entry->rxwi.freq_off,
+		  __entry->rxwi.resv2, __entry->rxwi.expert_ant,
+		  __entry->fce_info)
+);
+
+TRACE_EVENT(mt_tx,
+	TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
+		 struct mt76_sta *sta, struct mt76_txwi *h),
+	TP_ARGS(dev, skb, sta, h),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field_struct(struct mt76_txwi, h)
+		__field(struct sk_buff *, skb)
+		__field(struct mt76_sta *, sta)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->h = *h;
+		__entry->skb = skb;
+		__entry->sta = sta;
+	),
+	TP_printk(DEV_PR_FMT "skb:%p sta:%p  flg:%04hx rate_ctl:%04hx "
+		  "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+		  __entry->skb, __entry->sta,
+		  le16_to_cpu(__entry->h.flags),
+		  le16_to_cpu(__entry->h.rate_ctl),
+		  __entry->h.ack_ctl, __entry->h.wcid,
+		  le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt_tx_dma_done,
+	TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
+	TP_ARGS(dev, skb),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(struct sk_buff *, skb)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->skb = skb;
+	),
+	TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt_tx_status_cleaned,
+	TP_PROTO(struct mt7601u_dev *dev, int cleaned),
+	TP_ARGS(dev, cleaned),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(int, cleaned)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cleaned = cleaned;
+	),
+	TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt_tx_status,
+	TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
+	TP_ARGS(dev, stat1, stat2),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, stat1)	__field(u32, stat2)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->stat1 = stat1;
+		__entry->stat2 = stat2;
+	),
+	TP_printk(DEV_PR_FMT "%08x %08x",
+		  DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt_rx_dma_aggr,
+	TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
+	TP_ARGS(dev, cnt, paged),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, cnt)
+		__field(bool, paged)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cnt = cnt;
+		__entry->paged = paged;
+	),
+	TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+		  DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, set_key,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(set_shared_key,
+	TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
+	TP_ARGS(dev, vid, key),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, vid)
+		__field(u8, key)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->vid = vid;
+		__entry->key = key;
+	),
+	TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+		  DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
new file mode 100644
index 0000000..0be2080
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+
+enum mt76_txq_id {
+	MT_TXQ_VO = IEEE80211_AC_VO,
+	MT_TXQ_VI = IEEE80211_AC_VI,
+	MT_TXQ_BE = IEEE80211_AC_BE,
+	MT_TXQ_BK = IEEE80211_AC_BK,
+	MT_TXQ_PSD,
+	MT_TXQ_MCU,
+	__MT_TXQ_MAX
+};
+
+/* Hardware uses mirrored order of queues with Q0 having the highest priority */
+static u8 q2hwq(u8 q)
+{
+	return q ^ 0x3;
+}
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+	int qid = skb_get_queue_mapping(skb);
+
+	if (WARN_ON(qid >= MT_TXQ_PSD)) {
+		qid = MT_TXQ_BE;
+		skb_set_queue_mapping(skb, qid);
+	}
+
+	return q2hwq(qid);
+}
+
+/* Note: TX retry reporting is a bit broken.
+ *	 Retries are reported only once per AMPDU and often come a frame early
+ *	 i.e. they are reported in the last status preceding the AMPDU. Apart
+ *	 from the fact that it's hard to know the length of the AMPDU (which is
+ *	 required to know to how many consecutive frames retries should be
+ *	 applied), if status comes early on full FIFO it gets lost and retries
+ *	 of the whole AMPDU become invisible.
+ *	 As a work-around encode the desired rate in PKT_ID of TX descriptor
+ *	 and based on that guess the retries (every rate is tried once).
+ *	 Only downside here is that for MCS0 we have to rely solely on
+ *	 transmission failures as no retries can ever be reported.
+ *	 Not having to read EXT_FIFO has a nice effect of doubling the number
+ *	 of reports which can be fetched.
+ *	 Also the vendor driver never uses the EXT_FIFO register so it may be
+ *	 undertested.
+ */
+static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
+{
+	u8 encoded = (rate + 1) + is_probe *  8;
+
+	/* Because PKT_ID 0 disables status reporting only 15 values are
+	 * available but 16 are needed (8 MCS * 2 for encoding is_probe)
+	 * - we need to cram together two rates. MCS0 and MCS7 with is_probe
+	 * share PKT_ID 9.
+	 */
+	if (is_probe && rate == 7)
+		return encoded - 7;
+
+	return encoded;
+}
+
+static void
+mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+	u8 req_rate = stat->pktid;
+	u8 eff_rate = stat->rate & 0x7;
+
+	req_rate -= 1;
+
+	if (req_rate > 7) {
+		stat->is_probe = true;
+		req_rate -= 8;
+
+		/* Decide between MCS0 and MCS7 which share pktid 9 */
+		if (!req_rate && eff_rate)
+			req_rate = 7;
+	}
+
+	stat->retry = req_rate - eff_rate;
+}
+
+static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+					       struct ieee80211_tx_info *info)
+{
+	int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+	skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+	if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+		mt76_remove_hdr_pad(skb);
+
+	skb_trim(skb, pkt_len);
+}
+
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	mt7601u_tx_skb_remove_dma_overhead(skb, info);
+
+	ieee80211_tx_info_clear_status(info);
+	info->status.rates[0].idx = -1;
+	info->flags |= IEEE80211_TX_STAT_ACK;
+	ieee80211_tx_status(dev->hw, skb);
+}
+
+static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	u32 need_head;
+
+	need_head = sizeof(struct mt76_txwi) + 4;
+	if (hdr_len % 4)
+		need_head += 2;
+
+	return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
+		  struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+		  int pkt_len)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct mt76_txwi *txwi;
+	unsigned long flags;
+	bool is_probe;
+	u32 pkt_id;
+	u16 rate_ctl;
+	u8 nss;
+
+	txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (rate->idx < 0 || !rate->count)
+		rate_ctl = wcid->tx_rate;
+	else
+		rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 63, ba_size);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			ba_size = 0;
+		txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+		txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+					  MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
+						   sta->ht_cap.ampdu_density));
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			txwi->flags = 0;
+	}
+
+	txwi->wcid = wcid->idx;
+
+	is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+	pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
+	pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+	txwi->len_ctl = cpu_to_le16(pkt_len);
+
+	return txwi;
+}
+
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt7601u_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct ieee80211_sta *sta = control->sta;
+	struct mt76_sta *msta = NULL;
+	struct mt76_wcid *wcid = dev->mon_wcid;
+	struct mt76_txwi *txwi;
+	int pkt_len = skb->len;
+	int hw_q = skb2q(skb);
+
+	BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+	info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+	if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return;
+	}
+
+	if (sta) {
+		msta = (struct mt76_sta *) sta->drv_priv;
+		wcid = &msta->wcid;
+	} else if (vif) {
+		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+		wcid = &mvif->group_wcid;
+	}
+
+	txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+	if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
+		return;
+
+	trace_mt_tx(dev, skb, msta, txwi);
+}
+
+void mt7601u_tx_stat(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					       stat_work.work);
+	struct mt76_tx_status stat;
+	unsigned long flags;
+	int cleaned = 0;
+
+	while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+		stat = mt7601u_mac_fetch_tx_status(dev);
+		if (!stat.valid)
+			break;
+
+		mt7601u_tx_pktid_dec(dev, &stat);
+		mt76_send_tx_status(dev, &stat);
+
+		cleaned++;
+	}
+	trace_mt_tx_status_cleaned(dev, cleaned);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+	if (cleaned)
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(10));
+	else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(20));
+	else
+		clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+	u32 val;
+
+	/* TODO: should we do funny things with the parameters?
+	 *	 See what mt7601u_set_default_edca() used to do in init.c.
+	 */
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	WARN_ON(params->txop > 0xff);
+	WARN_ON(params->aifs > 0xf);
+	WARN_ON(cw_min > 0xf);
+	WARN_ON(cw_max > 0xf);
+
+	val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
+	      MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+	/* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+	 *	 a really long txop on AC0 (see connect.c:2009) but only on
+	 *	 connect? When not connected should be 0.
+	 */
+	if (!hw_q)
+		val |= 0x60;
+	else
+		val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+	mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
new file mode 100644
index 0000000..54dba40
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7601u.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt7601u_device_table[] = {
+	{ USB_DEVICE(0x0b05, 0x17d3) },
+	{ USB_DEVICE(0x0e8d, 0x760a) },
+	{ USB_DEVICE(0x0e8d, 0x760b) },
+	{ USB_DEVICE(0x13d3, 0x3431) },
+	{ USB_DEVICE(0x13d3, 0x3434) },
+	{ USB_DEVICE(0x148f, 0x7601) },
+	{ USB_DEVICE(0x148f, 0x760a) },
+	{ USB_DEVICE(0x148f, 0x760b) },
+	{ USB_DEVICE(0x148f, 0x760c) },
+	{ USB_DEVICE(0x148f, 0x760d) },
+	{ USB_DEVICE(0x2001, 0x3d04) },
+	{ USB_DEVICE(0x2717, 0x4106) },
+	{ USB_DEVICE(0x2955, 0x0001) },
+	{ USB_DEVICE(0x2955, 0x1001) },
+	{ USB_DEVICE(0x2a5f, 0x1000) },
+	{ USB_DEVICE(0x7392, 0x7710) },
+	{ 0, }
+};
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+			   struct mt7601u_dma_buf *buf)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+	buf->len = len;
+	buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+	buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+	return !buf->urb || !buf->buf;
+}
+
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+	usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+	usb_free_urb(buf->urb);
+}
+
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+			   struct mt7601u_dma_buf *buf, gfp_t gfp,
+			   usb_complete_t complete_fn, void *context)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	unsigned pipe;
+	int ret;
+
+	if (dir == USB_DIR_IN)
+		pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]);
+	else
+		pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]);
+
+	usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+			  complete_fn, context);
+	buf->urb->transfer_dma = buf->dma;
+	buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	trace_mt_submit_urb(dev, buf->urb);
+	ret = usb_submit_urb(buf->urb, gfp);
+	if (ret)
+		dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+			dir, ep_idx, ret);
+	return ret;
+}
+
+void mt7601u_complete_urb(struct urb *urb)
+{
+	struct completion *cmpl = urb->context;
+
+	complete(cmpl);
+}
+
+static int
+__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+			 const u8 direction, const u16 val, const u16 offset,
+			 void *buf, const size_t buflen)
+{
+	int i, ret;
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+	const unsigned int pipe = (direction == USB_DIR_IN) ?
+		usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+		ret = usb_control_msg(usb_dev, pipe, req, req_type,
+				      val, offset, buf, buflen,
+				      MT_VEND_REQ_TOUT_MS);
+		trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
+				  buf, buflen, ret);
+
+		if (ret >= 0 || ret == -ENODEV)
+			return ret;
+
+		msleep(5);
+	}
+
+	dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n",
+		req, offset, ret);
+
+	return ret;
+}
+
+int
+mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+		       const u8 direction, const u16 val, const u16 offset,
+		       void *buf, const size_t buflen)
+{
+	int ret;
+
+	mutex_lock(&dev->vendor_req_mutex);
+
+	ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
+				       buf, buflen);
+	if (ret == -ENODEV)
+		set_bit(MT7601U_STATE_REMOVED, &dev->state);
+
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return ret;
+}
+
+void mt7601u_vendor_reset(struct mt7601u_dev *dev)
+{
+	mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+			       MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+	int ret;
+	__le32 reg;
+	u32 val;
+
+	WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+	ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+				     0, offset, &reg, sizeof(reg));
+	val = le32_to_cpu(reg);
+	if (ret > 0 && ret != sizeof(reg)) {
+		dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+			ret, offset);
+		val = ~0;
+	}
+
+	trace_reg_read(dev, offset, val);
+	return val;
+}
+
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+			     const u16 offset, const u32 val)
+{
+	int ret;
+
+	ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+				     val & 0xffff, offset, NULL, 0);
+	if (ret)
+		return ret;
+	return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+				      val >> 16, offset + 2, NULL, 0);
+}
+
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+	mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+	trace_reg_write(dev, offset, val);
+}
+
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	val |= mt7601u_rr(dev, offset) & ~mask;
+	mt7601u_wr(dev, offset, val);
+	return val;
+}
+
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	u32 reg = mt7601u_rr(dev, offset);
+
+	val |= reg & ~mask;
+	if (reg != val)
+		mt7601u_wr(dev, offset, val);
+	return val;
+}
+
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+		     const void *data, int len)
+{
+	WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+	WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+	mt7601u_burst_write_regs(dev, offset, data, len / 4);
+}
+
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr)
+{
+	mt7601u_wr(dev, offset, get_unaligned_le32(addr));
+	mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt7601u_assign_pipes(struct usb_interface *usb_intf,
+				struct mt7601u_dev *dev)
+{
+	struct usb_endpoint_descriptor *ep_desc;
+	struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+	unsigned i, ep_i = 0, ep_o = 0;
+
+	BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX);
+	BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX);
+
+	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &intf_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(ep_desc) &&
+		    ep_i++ < __MT_EP_IN_MAX) {
+			dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc);
+			dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+			/* Note: this is ignored by usb sub-system but vendor
+			 *	 code does it. We can drop this at some point.
+			 */
+			dev->in_eps[ep_i - 1] |= USB_DIR_IN;
+		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
+			   ep_o++ < __MT_EP_OUT_MAX) {
+			dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc);
+			dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+		}
+	}
+
+	if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+		dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n",
+			ep_i, ep_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mt7601u_probe(struct usb_interface *usb_intf,
+			 const struct usb_device_id *id)
+{
+	struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+	struct mt7601u_dev *dev;
+	u32 asic_rev, mac_rev;
+	int ret;
+
+	dev = mt7601u_alloc_device(&usb_intf->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	usb_dev = usb_get_dev(usb_dev);
+	usb_reset_device(usb_dev);
+
+	usb_set_intfdata(usb_intf, dev);
+
+	ret = mt7601u_assign_pipes(usb_intf, dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+
+	asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION);
+	mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
+	dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+		 asic_rev, mac_rev);
+
+	/* Note: vendor driver skips this check for MT7601U */
+	if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+		dev_warn(dev->dev, "Warning: eFUSE not present\n");
+
+	ret = mt7601u_init_hardware(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_register_device(dev);
+	if (ret)
+		goto err_hw;
+
+	set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+	return 0;
+err_hw:
+	mt7601u_cleanup(dev);
+err:
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	destroy_workqueue(dev->stat_wq);
+	ieee80211_free_hw(dev->hw);
+	return ret;
+}
+
+static void mt7601u_disconnect(struct usb_interface *usb_intf)
+{
+	struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+	ieee80211_unregister_hw(dev->hw);
+	mt7601u_cleanup(dev);
+
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	destroy_workqueue(dev->stat_wq);
+	ieee80211_free_hw(dev->hw);
+}
+
+static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+	struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+	mt7601u_cleanup(dev);
+
+	return 0;
+}
+
+static int mt7601u_resume(struct usb_interface *usb_intf)
+{
+	struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+	int ret;
+
+	ret = mt7601u_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt7601u_device_table);
+MODULE_FIRMWARE(MT7601U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt7601u_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt7601u_device_table,
+	.probe		= mt7601u_probe,
+	.disconnect	= mt7601u_disconnect,
+	.suspend	= mt7601u_suspend,
+	.resume		= mt7601u_resume,
+	.reset_resume	= mt7601u_resume,
+	.soft_unbind	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7601u_driver);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
new file mode 100644
index 0000000..49e188f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_USB_H
+#define __MT7601U_USB_H
+
+#include "mt7601u.h"
+
+#define MT7601U_FIRMWARE	"mt7601u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY	10
+#define MT_VEND_REQ_TOUT_MS	300
+
+#define MT_VEND_DEV_MODE_RESET	1
+
+enum mt_vendor_req {
+	MT_VEND_DEV_MODE = 1,
+	MT_VEND_WRITE = 2,
+	MT_VEND_MULTI_READ = 7,
+	MT_VEND_WRITE_FCE = 0x42,
+};
+
+enum mt_usb_ep_in {
+	MT_EP_IN_PKT_RX,
+	MT_EP_IN_CMD_RESP,
+	__MT_EP_IN_MAX,
+};
+
+enum mt_usb_ep_out {
+	MT_EP_OUT_INBAND_CMD,
+	MT_EP_OUT_AC_BK,
+	MT_EP_OUT_AC_BE,
+	MT_EP_OUT_AC_VI,
+	MT_EP_OUT_AC_VO,
+	MT_EP_OUT_HCCA,
+	__MT_EP_OUT_MAX,
+};
+
+static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u)
+{
+	return interface_to_usbdev(to_usb_interface(mt7601u->dev));
+}
+
+static inline bool mt7601u_urb_has_error(struct urb *urb)
+{
+	return urb->status &&
+		urb->status != -ENOENT &&
+		urb->status != -ECONNRESET &&
+		urb->status != -ESHUTDOWN;
+}
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+			   struct mt7601u_dma_buf *buf);
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf);
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+			   struct mt7601u_dma_buf *buf, gfp_t gfp,
+			   usb_complete_t complete_fn, void *context);
+void mt7601u_complete_urb(struct urb *urb);
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen);
+void mt7601u_vendor_reset(struct mt7601u_dev *dev);
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+			     const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c
new file mode 100644
index 0000000..7c1787c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+void mt76_remove_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	memmove(skb->data + 2, skb->data, len);
+	skb_pull(skb, 2);
+}
+
+int mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+	int ret;
+
+	if (len % 4 == 0)
+		return 0;
+
+	ret = skb_cow(skb, 2);
+	if (ret)
+		return ret;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
new file mode 100644
index 0000000..b89140b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+/*
+ * Power of two check, this will check
+ * if the mask that has been given contains and contiguous set of bits.
+ * Note that we cannot use the is_power_of_2() function since this
+ * check must be done at compile-time.
+ */
+#define is_power_of_two(x)	( !((x) & ((x)-1)) )
+#define low_bit_mask(x)		( ((x)-1) & ~(x) )
+#define is_valid_mask(x)	is_power_of_two(1LU + (x) + low_bit_mask(x))
+
+/*
+ * Macros to find first set bit in a variable.
+ * These macros behave the same as the __ffs() functions but
+ * the most important difference that this is done during
+ * compile-time rather then run-time.
+ */
+#define compile_ffs2(__x) \
+	__builtin_choose_expr(((__x) & 0x1), 0, 1)
+
+#define compile_ffs4(__x) \
+	__builtin_choose_expr(((__x) & 0x3), \
+			      (compile_ffs2((__x))), \
+			      (compile_ffs2((__x) >> 2) + 2))
+
+#define compile_ffs8(__x) \
+	__builtin_choose_expr(((__x) & 0xf), \
+			      (compile_ffs4((__x))), \
+			      (compile_ffs4((__x) >> 4) + 4))
+
+#define compile_ffs16(__x) \
+	__builtin_choose_expr(((__x) & 0xff), \
+			      (compile_ffs8((__x))), \
+			      (compile_ffs8((__x) >> 8) + 8))
+
+#define compile_ffs32(__x) \
+	__builtin_choose_expr(((__x) & 0xffff), \
+			      (compile_ffs16((__x))), \
+			      (compile_ffs16((__x) >> 16) + 16))
+
+/*
+ * This macro will check the requirements for the FIELD{8,16,32} macros
+ * The mask should be a constant non-zero contiguous set of bits which
+ * does not exceed the given typelimit.
+ */
+#define FIELD_CHECK(__mask) \
+	BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
+
+#define MT76_SET(_mask, _val)						\
+	({								\
+		FIELD_CHECK(_mask);					\
+		(((u32) (_val)) << compile_ffs32(_mask)) & _mask;	\
+	})
+
+#define MT76_GET(_mask, _val)						\
+	({								\
+		FIELD_CHECK(_mask);					\
+		(u32) (((_val) & _mask) >> compile_ffs32(_mask));	\
+	})
+
+#endif
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 3ab87a8..71a1b58 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -134,8 +134,8 @@
 
 	chandef = priv->dfs_chandef;
 	if (priv->wdev.cac_started) {
-		dev_dbg(priv->adapter->dev,
-			"CAC timer finished; No radar detected\n");
+		mwifiex_dbg(priv->adapter, MSG,
+			    "CAC timer finished; No radar detected\n");
 		cfg80211_cac_event(priv->netdev, &chandef,
 				   NL80211_RADAR_CAC_FINISHED,
 				   GFP_KERNEL);
@@ -161,21 +161,40 @@
 	cr_req->chan_desc.chan_width = radar_params->chandef->width;
 	cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms);
 
-	dev_dbg(priv->adapter->dev,
-		"11h: issuing DFS Radar check for channel=%d\n",
-		radar_params->chandef->chan->hw_value);
+	if (radar_params->cac_time_ms)
+		mwifiex_dbg(priv->adapter, MSG,
+			    "11h: issuing DFS Radar check for channel=%d\n",
+			    radar_params->chandef->chan->hw_value);
+	else
+		mwifiex_dbg(priv->adapter, MSG, "cancelling CAC\n");
 
 	return 0;
 }
 
+int mwifiex_stop_radar_detection(struct mwifiex_private *priv,
+				 struct cfg80211_chan_def *chandef)
+{
+	struct mwifiex_radar_params radar_params;
+
+	memset(&radar_params, 0, sizeof(struct mwifiex_radar_params));
+	radar_params.chandef = chandef;
+	radar_params.cac_time_ms = 0;
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REPORT_REQUEST,
+				HostCmd_ACT_GEN_SET, 0, &radar_params, true);
+}
+
 /* This function is to abort ongoing CAC upon stopping AP operations
  * or during unload.
  */
 void mwifiex_abort_cac(struct mwifiex_private *priv)
 {
 	if (priv->wdev.cac_started) {
-		dev_dbg(priv->adapter->dev,
-			"Aborting delayed work for CAC.\n");
+		if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "failed to stop CAC in FW\n");
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Aborting delayed work for CAC.\n");
 		cancel_delayed_work_sync(&priv->dfs_cac_work);
 		cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
 				   NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
@@ -199,7 +218,8 @@
 				sizeof(u32));
 
 	if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) {
-		dev_err(priv->adapter->dev, "Error in channel report event\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Error in channel report event\n");
 		return -1;
 	}
 
@@ -212,8 +232,8 @@
 		switch (le16_to_cpu(rpt->header.type)) {
 		case TLV_TYPE_CHANRPT_11H_BASIC:
 			if (rpt->map.radar) {
-				dev_notice(priv->adapter->dev,
-					   "RADAR Detected on channel %d!\n",
+				mwifiex_dbg(priv->adapter, MSG,
+					    "RADAR Detected on channel %d!\n",
 					    priv->dfs_chandef.chan->hw_value);
 				cancel_delayed_work_sync(&priv->dfs_cac_work);
 				cfg80211_cac_event(priv->netdev,
@@ -242,16 +262,20 @@
 	rdr_event = (void *)(skb->data + sizeof(u32));
 
 	if (le32_to_cpu(rdr_event->passed)) {
-		dev_notice(priv->adapter->dev,
-			   "radar detected; indicating kernel\n");
+		mwifiex_dbg(priv->adapter, MSG,
+			    "radar detected; indicating kernel\n");
+		if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Failed to stop CAC in FW\n");
 		cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
 				     GFP_KERNEL);
-		dev_dbg(priv->adapter->dev, "regdomain: %d\n",
-			rdr_event->reg_domain);
-		dev_dbg(priv->adapter->dev, "radar detection type: %d\n",
-			rdr_event->det_type);
+		mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+			    rdr_event->reg_domain);
+		mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+			    rdr_event->det_type);
 	} else {
-		dev_dbg(priv->adapter->dev, "false radar detection event!\n");
+		mwifiex_dbg(priv->adapter, MSG,
+			    "false radar detection event!\n");
 	}
 
 	return 0;
@@ -276,20 +300,20 @@
 
 	bss_cfg = &priv->bss_cfg;
 	if (!bss_cfg->beacon_period) {
-		dev_err(priv->adapter->dev,
-			"channel switch: AP already stopped\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "channel switch: AP already stopped\n");
 		return;
 	}
 
-	mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef);
+	mwifiex_uap_set_channel(priv, bss_cfg, priv->dfs_chandef);
 
 	if (mwifiex_config_start_uap(priv, bss_cfg)) {
-		dev_dbg(priv->adapter->dev,
-			"Failed to start AP after channel switch\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to start AP after channel switch\n");
 		return;
 	}
 
-	dev_notice(priv->adapter->dev,
-		   "indicating channel switch completion to kernel\n");
+	mwifiex_dbg(priv->adapter, MSG,
+		    "indicating channel switch completion to kernel\n");
 	cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
 }
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 433bd68..c174e79 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -42,7 +42,7 @@
 					priv->wdev.wiphy->bands[radio_type];
 
 	if (WARN_ON_ONCE(!sband)) {
-		dev_err(priv->adapter->dev, "Invalid radio type!\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Invalid radio type!\n");
 		return -EINVAL;
 	}
 
@@ -156,7 +156,7 @@
 int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
 			      struct host_cmd_ds_command *resp)
 {
-	int tid;
+	int tid, tid_down;
 	struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
 	struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
 	struct mwifiex_ra_list_tbl *ra_list;
@@ -167,7 +167,9 @@
 
 	tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
 	       >> BLOCKACKPARAM_TID_POS;
-	ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp->
+
+	tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+	ra_list = mwifiex_wmm_get_ralist_node(priv, tid_down, add_ba_rsp->
 		peer_mac_addr);
 	if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
 		if (ra_list) {
@@ -184,7 +186,7 @@
 
 	tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
 	if (tx_ba_tbl) {
-		dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+		mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n");
 		tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
 		if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
 		    priv->add_ba_param.tx_amsdu &&
@@ -197,7 +199,7 @@
 			ra_list->ba_status = BA_SETUP_COMPLETE;
 		}
 	} else {
-		dev_err(priv->adapter->dev, "BA stream not created\n");
+		mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n");
 	}
 
 	return 0;
@@ -224,7 +226,8 @@
 	tx_buf->action = cpu_to_le16(action);
 	switch (action) {
 	case HostCmd_ACT_GEN_SET:
-		dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size);
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: set tx_buf=%d\n", *buf_size);
 		tx_buf->buff_size = cpu_to_le16(*buf_size);
 		break;
 	case HostCmd_ACT_GEN_GET:
@@ -466,7 +469,8 @@
 	    mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
 		return;
 
-	dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
 
 	list_del(&tx_ba_tsr_tbl->list);
 
@@ -528,13 +532,16 @@
 	struct mwifiex_tx_ba_stream_tbl *new_node;
 	struct mwifiex_ra_list_tbl *ra_list;
 	unsigned long flags;
+	int tid_down;
 
 	if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
 		new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl),
 				   GFP_ATOMIC);
 		if (!new_node)
 			return;
-		ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra);
+
+		tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+		ra_list = mwifiex_wmm_get_ralist_node(priv, tid_down, ra);
 		if (ra_list) {
 			ra_list->ba_status = ba_status;
 			ra_list->amsdu_in_ampdu = false;
@@ -563,7 +570,7 @@
 	unsigned long flags;
 	u16 block_ack_param_set;
 
-	dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
+	mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
 
 	if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
 	    ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
@@ -575,9 +582,9 @@
 		sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
 		if (!sta_ptr) {
 			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-			dev_warn(priv->adapter->dev,
-				 "BA setup with unknown TDLS peer %pM!\n",
-				peer_mac);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "BA setup with unknown TDLS peer %pM!\n",
+				    peer_mac);
 			return -1;
 		}
 		if (sta_ptr->is_11ac_enabled)
@@ -641,6 +648,30 @@
 }
 
 /*
+ * This function sends delba to specific tid
+ */
+void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
+{
+	struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+
+	if (list_empty(&priv->rx_reorder_tbl_ptr)) {
+		dev_dbg(priv->adapter->dev,
+			"mwifiex_11n_delba: rx_reorder_tbl_ptr empty\n");
+		return;
+	}
+
+	list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
+		if (rx_reor_tbl_ptr->tid == tid) {
+			dev_dbg(priv->adapter->dev,
+				"Send delba to tid=%d, %pM\n",
+				tid, rx_reor_tbl_ptr->ta);
+			mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
+			return;
+		}
+	}
+}
+
+/*
  * This function handles the command response of a delete BA request.
  */
 void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba)
@@ -706,8 +737,8 @@
 	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
 	list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
 		rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
-		dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
-			__func__, rx_reo_tbl->tid);
+		mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
+			    __func__, rx_reo_tbl->tid);
 		memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
 		rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
 		rx_reo_tbl++;
@@ -812,3 +843,72 @@
 
 	return sec_offset;
 }
+
+/* This function will send DELBA to entries in the priv's
+ * Tx BA stream table
+ */
+static void
+mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
+{
+	struct mwifiex_adapter *adapter = priv->adapter;
+	struct mwifiex_tx_ba_stream_tbl *tx_ba_stream_tbl_ptr;
+
+	if (list_empty(&priv->tx_ba_stream_tbl_ptr))
+		return;
+
+	list_for_each_entry(tx_ba_stream_tbl_ptr,
+			    &priv->tx_ba_stream_tbl_ptr, list) {
+		if (tx_ba_stream_tbl_ptr->ba_status == BA_SETUP_COMPLETE) {
+			if (tid == tx_ba_stream_tbl_ptr->tid) {
+				dev_dbg(adapter->dev,
+					"Tx:Send delba to tid=%d, %pM\n", tid,
+					tx_ba_stream_tbl_ptr->ra);
+				mwifiex_send_delba(priv,
+						   tx_ba_stream_tbl_ptr->tid,
+						   tx_ba_stream_tbl_ptr->ra, 1);
+				return;
+			}
+		}
+	}
+}
+
+/* This function updates all the tx_win_size
+ */
+void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
+{
+	u8 i;
+	u32 tx_win_size;
+	struct mwifiex_private *priv;
+
+	for (i = 0; i < adapter->priv_num; i++) {
+		if (!adapter->priv[i])
+			continue;
+		priv = adapter->priv[i];
+		tx_win_size = priv->add_ba_param.tx_win_size;
+
+		if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+			priv->add_ba_param.tx_win_size =
+				MWIFIEX_STA_AMPDU_DEF_TXWINSIZE;
+
+		if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
+			priv->add_ba_param.tx_win_size =
+				MWIFIEX_STA_AMPDU_DEF_TXWINSIZE;
+
+		if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP)
+			priv->add_ba_param.tx_win_size =
+				MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE;
+
+		if (adapter->coex_win_size) {
+			if (adapter->coex_tx_win_size)
+				priv->add_ba_param.tx_win_size =
+					adapter->coex_tx_win_size;
+		}
+
+		if (tx_win_size != priv->add_ba_param.tx_win_size) {
+			if (!priv->media_connected)
+				continue;
+			for (i = 0; i < MAX_NUM_TID; i++)
+				mwifiex_send_delba_txbastream_tbl(priv, i);
+		}
+	}
+}
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 6183e25..f7c7172 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -187,7 +187,6 @@
 	skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
 					       GFP_ATOMIC | GFP_DMA);
 	if (!skb_aggr) {
-		dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 				       ra_list_flags);
 		return -1;
@@ -297,13 +296,13 @@
 		tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 				       ra_list_flags);
-		dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		break;
 	case -1:
 		if (adapter->iface_type != MWIFIEX_PCIE)
 			adapter->data_sent = false;
-		dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
-			__func__, ret);
+		mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
+			    __func__, ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
 		return 0;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index f75f8ac..2906cd5 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -51,8 +51,8 @@
 			rx_skb = __skb_dequeue(&list);
 			ret = mwifiex_recv_packet(priv, rx_skb);
 			if (ret == -1)
-				dev_err(priv->adapter->dev,
-					"Rx of A-MSDU failed");
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "Rx of A-MSDU failed");
 		}
 		return 0;
 	}
@@ -304,7 +304,7 @@
 	if (seq_num < 0)
 		return;
 
-	dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+	mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
 	start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
 	mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
 						 start_win);
@@ -367,8 +367,9 @@
 	}
 	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 
-	dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n",
-		last_seq, new_node->start_win);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: last_seq=%d start_win=%d\n",
+		    last_seq, new_node->start_win);
 
 	if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
 	    last_seq >= new_node->start_win) {
@@ -382,8 +383,8 @@
 					GFP_KERNEL);
 	if (!new_node->rx_reorder_ptr) {
 		kfree((u8 *) new_node);
-		dev_err(priv->adapter->dev,
-			"%s: failed to alloc reorder_ptr\n", __func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: failed to alloc reorder_ptr\n", __func__);
 		return;
 	}
 
@@ -467,9 +468,9 @@
 						cmd_addba_req->peer_mac_addr);
 		if (!sta_ptr) {
 			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-			dev_warn(priv->adapter->dev,
-				 "BA setup with unknown TDLS peer %pM!\n",
-				 cmd_addba_req->peer_mac_addr);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "BA setup with unknown TDLS peer %pM!\n",
+				    cmd_addba_req->peer_mac_addr);
 			return -1;
 		}
 		if (sta_ptr->is_11ac_enabled)
@@ -573,14 +574,14 @@
 	}
 
 	if (tbl->flags & RXREOR_FORCE_NO_DROP) {
-		dev_dbg(priv->adapter->dev,
-			"RXREOR_FORCE_NO_DROP when HS is activated\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "RXREOR_FORCE_NO_DROP when HS is activated\n");
 		tbl->flags &= ~RXREOR_FORCE_NO_DROP;
 	} else if (init_window_shift && seq_num < start_win &&
 		   seq_num >= tbl->init_win) {
-		dev_dbg(priv->adapter->dev,
-			"Sender TID sequence number reset %d->%d for SSN %d\n",
-			start_win, seq_num, tbl->init_win);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "Sender TID sequence number reset %d->%d for SSN %d\n",
+			    start_win, seq_num, tbl->init_win);
 		tbl->start_win = start_win = seq_num;
 		end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
 	} else {
@@ -662,32 +663,35 @@
 	struct mwifiex_ra_list_tbl *ra_list;
 	u8 cleanup_rx_reorder_tbl;
 	unsigned long flags;
+	int tid_down;
 
 	if (type == TYPE_DELBA_RECEIVE)
 		cleanup_rx_reorder_tbl = (initiator) ? true : false;
 	else
 		cleanup_rx_reorder_tbl = (initiator) ? false : true;
 
-	dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
-		peer_mac, tid, initiator);
+	mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n",
+		    peer_mac, tid, initiator);
 
 	if (cleanup_rx_reorder_tbl) {
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 								 peer_mac);
 		if (!tbl) {
-			dev_dbg(priv->adapter->dev,
-				"event: TID, TA not found in table\n");
+			mwifiex_dbg(priv->adapter, EVENT,
+				    "event: TID, TA not found in table\n");
 			return;
 		}
 		mwifiex_del_rx_reorder_entry(priv, tbl);
 	} else {
 		ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
 		if (!ptx_tbl) {
-			dev_dbg(priv->adapter->dev,
-				"event: TID, RA not found in table\n");
+			mwifiex_dbg(priv->adapter, EVENT,
+				    "event: TID, RA not found in table\n");
 			return;
 		}
-		ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
+
+		tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+		ra_list = mwifiex_wmm_get_ralist_node(priv, tid_down, peer_mac);
 		if (ra_list) {
 			ra_list->amsdu_in_ampdu = false;
 			ra_list->ba_status = BA_SETUP_NONE;
@@ -721,8 +725,8 @@
 	 * the stream
 	 */
 	if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
-		dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
-			add_ba_rsp->peer_mac_addr, tid);
+		mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
+			    add_ba_rsp->peer_mac_addr, tid);
 
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 						     add_ba_rsp->peer_mac_addr);
@@ -746,8 +750,8 @@
 			tbl->amsdu = false;
 	}
 
-	dev_dbg(priv->adapter->dev,
-		"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+	mwifiex_dbg(priv->adapter, CMD,
+		    "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
 		add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
 
 	return 0;
@@ -824,3 +828,83 @@
 
 	return;
 }
+
+/* This function update all the rx_win_size based on coex flag
+ */
+static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
+					   bool coex_flag)
+{
+	u8 i;
+	u32 rx_win_size;
+	struct mwifiex_private *priv;
+
+	dev_dbg(adapter->dev, "Update rxwinsize %d\n", coex_flag);
+
+	for (i = 0; i < adapter->priv_num; i++) {
+		if (!adapter->priv[i])
+			continue;
+		priv = adapter->priv[i];
+		rx_win_size = priv->add_ba_param.rx_win_size;
+		if (coex_flag) {
+			if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+				priv->add_ba_param.rx_win_size =
+					MWIFIEX_STA_COEX_AMPDU_DEF_RXWINSIZE;
+			if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
+				priv->add_ba_param.rx_win_size =
+					MWIFIEX_STA_COEX_AMPDU_DEF_RXWINSIZE;
+			if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP)
+				priv->add_ba_param.rx_win_size =
+					MWIFIEX_UAP_COEX_AMPDU_DEF_RXWINSIZE;
+		} else {
+			if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+				priv->add_ba_param.rx_win_size =
+					MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
+			if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
+				priv->add_ba_param.rx_win_size =
+					MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
+			if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP)
+				priv->add_ba_param.rx_win_size =
+					MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE;
+		}
+
+		if (adapter->coex_win_size && adapter->coex_rx_win_size)
+			priv->add_ba_param.rx_win_size =
+					adapter->coex_rx_win_size;
+
+		if (rx_win_size != priv->add_ba_param.rx_win_size) {
+			if (!priv->media_connected)
+				continue;
+			for (i = 0; i < MAX_NUM_TID; i++)
+				mwifiex_11n_delba(priv, i);
+		}
+	}
+}
+
+/* This function check coex for RX BA
+ */
+void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
+{
+	u8 i;
+	struct mwifiex_private *priv;
+	u8 count = 0;
+
+	for (i = 0; i < adapter->priv_num; i++) {
+		if (adapter->priv[i]) {
+			priv = adapter->priv[i];
+			if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+				if (priv->media_connected)
+					count++;
+			}
+			if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+				if (priv->bss_started)
+					count++;
+			}
+		}
+		if (count >= MWIFIEX_BSS_COEX_COUNT)
+			break;
+	}
+	if (count >= MWIFIEX_BSS_COEX_COUNT)
+		mwifiex_update_ampdu_rxwinsize(adapter, true);
+	else
+		mwifiex_update_ampdu_rxwinsize(adapter, false);
+}
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index aa01c9b..48edf38 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -12,6 +12,7 @@
 	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
 	depends on MWIFIEX && MMC
 	select FW_LOADER
+	select WANT_DEV_COREDUMP
 	---help---
 	  This adds support for wireless adapters based on Marvell
 	  8786/8787/8797/8887/8897 chipsets with SDIO interface.
@@ -23,6 +24,7 @@
 	tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
 	depends on MWIFIEX && PCI
 	select FW_LOADER
+	select WANT_DEV_COREDUMP
 	---help---
 	  This adds support for wireless adapters based on Marvell
 	  8766/8897 chipsets with PCIe interface.
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index 31928ca..2f0f9b5 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -230,9 +230,9 @@
 
 	cat getlog
 
-fw_dump
-	This command is used to dump firmware memory into files.
-	Separate file will be created for each memory segment.
+device_dump
+	This command is used to dump driver information and firmware memory
+	segments.
 	Usage:
 
 	cat fw_dump
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index bf9020f..b15e4c7 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -67,6 +67,22 @@
 	}
 }
 
+/* This function maps IEEE HT secondary channel type to NL80211 channel type
+ */
+u8 mwifiex_sec_chan_offset_to_chan_type(u8 second_chan_offset)
+{
+	switch (second_chan_offset) {
+	case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+		return NL80211_CHAN_HT20;
+	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+		return NL80211_CHAN_HT40PLUS;
+	case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+		return NL80211_CHAN_HT40MINUS;
+	default:
+		return NL80211_CHAN_HT20;
+	}
+}
+
 /*
  * This function checks whether WEP is set.
  */
@@ -104,11 +120,11 @@
 	const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
 
 	if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
-		wiphy_err(wiphy, "deleting the crypto keys\n");
+		mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n");
 		return -EFAULT;
 	}
 
-	wiphy_dbg(wiphy, "info: crypto keys deleted\n");
+	mwifiex_dbg(priv->adapter, INFO, "info: crypto keys deleted\n");
 	return 0;
 }
 
@@ -163,7 +179,7 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
 
 	if (!buf || !len) {
-		wiphy_err(wiphy, "invalid buffer and length\n");
+		mwifiex_dbg(priv->adapter, ERROR, "invalid buffer and length\n");
 		return -EFAULT;
 	}
 
@@ -172,8 +188,8 @@
 	    ieee80211_is_probe_resp(mgmt->frame_control)) {
 		/* Since we support offload probe resp, we need to skip probe
 		 * resp in AP or GO mode */
-		wiphy_dbg(wiphy,
-			  "info: skip to send probe resp in AP or GO mode\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: skip to send probe resp in AP or GO mode\n");
 		return 0;
 	}
 
@@ -183,7 +199,8 @@
 			    pkt_len + sizeof(pkt_len));
 
 	if (!skb) {
-		wiphy_err(wiphy, "allocate skb failed for management frame\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "allocate skb failed for management frame\n");
 		return -ENOMEM;
 	}
 
@@ -206,7 +223,7 @@
 
 	mwifiex_queue_tx_pkt(priv, skb);
 
-	wiphy_dbg(wiphy, "info: management frame transmitted\n");
+	mwifiex_dbg(priv->adapter, INFO, "info: management frame transmitted\n");
 	return 0;
 }
 
@@ -231,7 +248,7 @@
 		mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
 				 HostCmd_ACT_GEN_SET, 0,
 				 &priv->mgmt_frame_mask, false);
-		wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+		mwifiex_dbg(priv->adapter, INFO, "info: mgmt frame registered\n");
 	}
 }
 
@@ -248,13 +265,14 @@
 	int ret;
 
 	if (!chan || !cookie) {
-		wiphy_err(wiphy, "Invalid parameter for ROC\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Invalid parameter for ROC\n");
 		return -EINVAL;
 	}
 
 	if (priv->roc_cfg.cookie) {
-		wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n",
-			  priv->roc_cfg.cookie);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: ongoing ROC, cookie = 0x%llx\n",
+			    priv->roc_cfg.cookie);
 		return -EBUSY;
 	}
 
@@ -269,7 +287,8 @@
 		cfg80211_ready_on_channel(wdev, *cookie, chan,
 					  duration, GFP_ATOMIC);
 
-		wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: ROC, cookie = 0x%llx\n", *cookie);
 	}
 
 	return ret;
@@ -298,7 +317,8 @@
 
 		memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
 
-		wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: cancel ROC, cookie = 0x%llx\n", cookie);
 	}
 
 	return ret;
@@ -344,8 +364,8 @@
 	u32 ps_mode;
 
 	if (timeout)
-		wiphy_dbg(wiphy,
-			  "info: ignore timeout value for IEEE Power Save\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: ignore timeout value for IEEE Power Save\n");
 
 	ps_mode = enabled;
 
@@ -370,7 +390,7 @@
 		priv->wep_key_curr_index = key_index;
 	} else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
 				      NULL, 0)) {
-		wiphy_err(wiphy, "set default Tx key index\n");
+		mwifiex_dbg(priv->adapter, ERROR, "set default Tx key index\n");
 		return -EFAULT;
 	}
 
@@ -407,7 +427,7 @@
 
 	if (mwifiex_set_encode(priv, params, params->key, params->key_len,
 			       key_index, peer_mac, 0)) {
-		wiphy_err(wiphy, "crypto keys added\n");
+		mwifiex_dbg(priv->adapter, ERROR, "crypto keys added\n");
 		return -EFAULT;
 	}
 
@@ -442,7 +462,8 @@
 
 	band = mwifiex_band_to_radio_type(adapter->config_bands);
 	if (!wiphy->bands[band]) {
-		wiphy_err(wiphy, "11D: setting domain info in FW\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "11D: setting domain info in FW\n");
 		return -1;
 	}
 
@@ -493,7 +514,8 @@
 
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
 			     HostCmd_ACT_GEN_SET, 0, NULL, false)) {
-		wiphy_err(wiphy, "11D: setting domain info in FW\n");
+		mwifiex_dbg(adapter, INFO,
+			    "11D: setting domain info in FW\n");
 		return -1;
 	}
 
@@ -516,9 +538,9 @@
 	struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
 	struct mwifiex_private *priv = mwifiex_get_priv(adapter,
 							MWIFIEX_BSS_ROLE_ANY);
-
-	wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
-		  request->alpha2[0], request->alpha2[1]);
+	mwifiex_dbg(adapter, INFO,
+		    "info: cfg80211 regulatory domain callback for %c%c\n",
+		    request->alpha2[0], request->alpha2[1]);
 
 	switch (request->initiator) {
 	case NL80211_REGDOM_SET_BY_DRIVER:
@@ -527,8 +549,9 @@
 	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
 		break;
 	default:
-		wiphy_err(wiphy, "unknown regdom initiator: %d\n",
-			  request->initiator);
+		mwifiex_dbg(adapter, ERROR,
+			    "unknown regdom initiator: %d\n",
+			    request->initiator);
 		return;
 	}
 
@@ -597,8 +620,8 @@
 	switch (priv->bss_role) {
 	case MWIFIEX_BSS_ROLE_UAP:
 		if (priv->bss_started) {
-			dev_err(adapter->dev,
-				"cannot change wiphy params when bss started");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot change wiphy params when bss started");
 			return -EINVAL;
 		}
 
@@ -622,15 +645,16 @@
 
 		kfree(bss_cfg);
 		if (ret) {
-			wiphy_err(wiphy, "Failed to set wiphy phy params\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Failed to set wiphy phy params\n");
 			return ret;
 		}
 		break;
 
 		case MWIFIEX_BSS_ROLE_STA:
 		if (priv->media_connected) {
-			dev_err(adapter->dev,
-				"cannot change wiphy params when connected");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot change wiphy params when connected");
 			return -EINVAL;
 		}
 		if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -724,8 +748,8 @@
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
 			     HostCmd_ACT_GEN_SET, 0,
 			     &priv->mgmt_frame_mask, false)) {
-		dev_warn(priv->adapter->dev,
-			 "could not unregister mgmt frame rx\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "could not unregister mgmt frame rx\n");
 		return -1;
 	}
 
@@ -789,9 +813,9 @@
 		priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
 		break;
 	default:
-		dev_err(priv->adapter->dev,
-			"%s: changing to %d not supported\n",
-			dev->name, type);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: changing to %d not supported\n",
+			    dev->name, type);
 		return -EOPNOTSUPP;
 	}
 
@@ -824,12 +848,13 @@
 
 	if (adapter->curr_iface_comb.p2p_intf ==
 	    adapter->iface_limit.p2p_intf) {
-		dev_err(adapter->dev,
-			"cannot create multiple P2P ifaces\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot create multiple P2P ifaces\n");
 		return -1;
 	}
 
-	dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name);
+	mwifiex_dbg(adapter, INFO,
+		    "%s: changing role to p2p\n", dev->name);
 
 	if (mwifiex_deinit_priv_params(priv))
 		return -1;
@@ -846,9 +871,9 @@
 			return -EFAULT;
 		break;
 	default:
-		dev_err(priv->adapter->dev,
-			"%s: changing to %d not supported\n",
-			dev->name, type);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: changing to %d not supported\n",
+			    dev->name, type);
 		return -EOPNOTSUPP;
 	}
 
@@ -897,17 +922,17 @@
 	     curr_iftype != NL80211_IFTYPE_P2P_GO) &&
 	    (adapter->curr_iface_comb.sta_intf ==
 	     adapter->iface_limit.sta_intf)) {
-		dev_err(adapter->dev,
-			"cannot create multiple station/adhoc ifaces\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot create multiple station/adhoc ifaces\n");
 		return -1;
 	}
 
 	if (type == NL80211_IFTYPE_STATION)
-		dev_notice(adapter->dev,
-			   "%s: changing role to station\n", dev->name);
+		mwifiex_dbg(adapter, INFO,
+			    "%s: changing role to station\n", dev->name);
 	else
-		dev_notice(adapter->dev,
-			   "%s: changing role to adhoc\n", dev->name);
+		mwifiex_dbg(adapter, INFO,
+			    "%s: changing role to adhoc\n", dev->name);
 
 	if (mwifiex_deinit_priv_params(priv))
 		return -1;
@@ -954,12 +979,13 @@
 
 	if (adapter->curr_iface_comb.uap_intf ==
 	    adapter->iface_limit.uap_intf) {
-		dev_err(adapter->dev,
-			"cannot create multiple AP ifaces\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot create multiple AP ifaces\n");
 		return -1;
 	}
 
-	dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name);
+	mwifiex_dbg(adapter, INFO,
+		    "%s: changing role to AP\n", dev->name);
 
 	if (mwifiex_deinit_priv_params(priv))
 		return -1;
@@ -1020,12 +1046,14 @@
 			return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
 							flags, params);
 		case NL80211_IFTYPE_UNSPECIFIED:
-			wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "%s: kept type as IBSS\n", dev->name);
 		case NL80211_IFTYPE_ADHOC:	/* This shouldn't happen */
 			return 0;
 		default:
-			wiphy_err(wiphy, "%s: changing to %d not supported\n",
-				  dev->name, type);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "%s: changing to %d not supported\n",
+				    dev->name, type);
 			return -EOPNOTSUPP;
 		}
 		break;
@@ -1048,12 +1076,14 @@
 			return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
 							flags, params);
 		case NL80211_IFTYPE_UNSPECIFIED:
-			wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "%s: kept type as STA\n", dev->name);
 		case NL80211_IFTYPE_STATION:	/* This shouldn't happen */
 			return 0;
 		default:
-			wiphy_err(wiphy, "%s: changing to %d not supported\n",
-				  dev->name, type);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "%s: changing to %d not supported\n",
+				    dev->name, type);
 			return -EOPNOTSUPP;
 		}
 		break;
@@ -1070,12 +1100,14 @@
 			return mwifiex_change_vif_to_p2p(dev, curr_iftype,
 							 type, flags, params);
 		case NL80211_IFTYPE_UNSPECIFIED:
-			wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "%s: kept type as AP\n", dev->name);
 		case NL80211_IFTYPE_AP:		/* This shouldn't happen */
 			return 0;
 		default:
-			wiphy_err(wiphy, "%s: changing to %d not supported\n",
-				  dev->name, type);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "%s: changing to %d not supported\n",
+				    dev->name, type);
 			return -EOPNOTSUPP;
 		}
 		break;
@@ -1100,19 +1132,22 @@
 			return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
 							flags, params);
 		case NL80211_IFTYPE_UNSPECIFIED:
-			wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "%s: kept type as P2P\n", dev->name);
 		case NL80211_IFTYPE_P2P_CLIENT:
 		case NL80211_IFTYPE_P2P_GO:
 			return 0;
 		default:
-			wiphy_err(wiphy, "%s: changing to %d not supported\n",
-				  dev->name, type);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "%s: changing to %d not supported\n",
+				    dev->name, type);
 			return -EOPNOTSUPP;
 		}
 		break;
 	default:
-		wiphy_err(wiphy, "%s: unknown iftype: %d\n",
-			  dev->name, dev->ieee80211_ptr->iftype);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: unknown iftype: %d\n",
+			    dev->name, dev->ieee80211_ptr->iftype);
 		return -EOPNOTSUPP;
 	}
 
@@ -1194,6 +1229,7 @@
  */
 static int
 mwifiex_dump_station_info(struct mwifiex_private *priv,
+			  struct mwifiex_sta_node *node,
 			  struct station_info *sinfo)
 {
 	u32 rate;
@@ -1203,15 +1239,41 @@
 			BIT(NL80211_STA_INFO_TX_BITRATE) |
 			BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG);
 
+	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+		if (!node)
+			return -ENOENT;
+
+		sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
+				BIT(NL80211_STA_INFO_TX_FAILED);
+		sinfo->inactive_time =
+			jiffies_to_msecs(jiffies - node->stats.last_rx);
+
+		sinfo->signal = node->stats.rssi;
+		sinfo->signal_avg = node->stats.rssi;
+		sinfo->rx_bytes = node->stats.rx_bytes;
+		sinfo->tx_bytes = node->stats.tx_bytes;
+		sinfo->rx_packets = node->stats.rx_packets;
+		sinfo->tx_packets = node->stats.tx_packets;
+		sinfo->tx_failed = node->stats.tx_failed;
+
+		mwifiex_parse_htinfo(priv, node->stats.last_tx_htinfo,
+				     &sinfo->txrate);
+		sinfo->txrate.legacy = node->stats.last_tx_rate * 5;
+
+		return 0;
+	}
+
 	/* Get signal information from the firmware */
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
 			     HostCmd_ACT_GEN_GET, 0, NULL, true)) {
-		dev_err(priv->adapter->dev, "failed to get signal information\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "failed to get signal information\n");
 		return -EFAULT;
 	}
 
 	if (mwifiex_drv_get_data_rate(priv, &rate)) {
-		dev_err(priv->adapter->dev, "getting data rate\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "getting data rate error\n");
 		return -EFAULT;
 	}
 
@@ -1267,7 +1329,7 @@
 	if (memcmp(mac, priv->cfg_bssid, ETH_ALEN))
 		return -ENOENT;
 
-	return mwifiex_dump_station_info(priv, sinfo);
+	return mwifiex_dump_station_info(priv, NULL, sinfo);
 }
 
 /*
@@ -1278,13 +1340,29 @@
 			      int idx, u8 *mac, struct station_info *sinfo)
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+	static struct mwifiex_sta_node *node;
 
-	if (!priv->media_connected || idx)
-		return -ENOENT;
+	if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+	    priv->media_connected && idx == 0) {
+		ether_addr_copy(mac, priv->cfg_bssid);
+		return mwifiex_dump_station_info(priv, NULL, sinfo);
+	} else if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+		mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST,
+				 HostCmd_ACT_GEN_GET, 0, NULL, true);
 
-	memcpy(mac, priv->cfg_bssid, ETH_ALEN);
+		if (node && (&node->list == &priv->sta_list)) {
+			node = NULL;
+			return -ENOENT;
+		}
 
-	return mwifiex_dump_station_info(priv, sinfo);
+		node = list_prepare_entry(node, &priv->sta_list, list);
+		list_for_each_entry_continue(node, &priv->sta_list, list) {
+			ether_addr_copy(mac, node->mac_addr);
+			return mwifiex_dump_station_info(priv, node, sinfo);
+		}
+	}
+
+	return -ENOENT;
 }
 
 static int
@@ -1295,7 +1373,7 @@
 	struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
 	enum ieee80211_band band;
 
-	dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx);
+	mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
 
 	memset(survey, 0, sizeof(struct survey_info));
 
@@ -1472,8 +1550,8 @@
 	struct mwifiex_adapter *adapter = priv->adapter;
 
 	if (!priv->media_connected) {
-		dev_err(adapter->dev,
-			"Can not set Tx data rate in disconnected state\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Can not set Tx data rate in disconnected state\n");
 		return -EINVAL;
 	}
 
@@ -1556,17 +1634,20 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
 	if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
-		wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: bss_type mismatched\n", __func__);
 		return -EINVAL;
 	}
 
 	if (!priv->bss_started) {
-		wiphy_err(wiphy, "%s: bss not started\n", __func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: bss not started\n", __func__);
 		return -EINVAL;
 	}
 
 	if (mwifiex_set_mgmt_ies(priv, data)) {
-		wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: setting mgmt ies failed\n", __func__);
 		return -EFAULT;
 	}
 
@@ -1594,7 +1675,8 @@
 	if (!params->mac || is_broadcast_ether_addr(params->mac))
 		return 0;
 
-	wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
+	mwifiex_dbg(priv->adapter, INFO, "%s: mac address %pM\n",
+		    __func__, params->mac);
 
 	eth_zero_addr(deauth_mac);
 
@@ -1687,14 +1769,23 @@
 	mwifiex_abort_cac(priv);
 
 	if (mwifiex_del_mgmt_ies(priv))
-		wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to delete mgmt IEs!\n");
 
 	priv->ap_11n_enabled = 0;
 	memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg));
 
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
 			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-		wiphy_err(wiphy, "Failed to stop the BSS\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to stop the BSS\n");
+		return -1;
+	}
+
+	if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET,
+			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to reset BSS\n");
 		return -1;
 	}
 
@@ -1751,12 +1842,13 @@
 		return -EINVAL;
 	}
 
-	mwifiex_uap_set_channel(bss_cfg, params->chandef);
+	mwifiex_uap_set_channel(priv, bss_cfg, params->chandef);
 	mwifiex_set_uap_rates(bss_cfg, params);
 
 	if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
 		kfree(bss_cfg);
-		wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to parse secuirty parameters!\n");
 		return -1;
 	}
 
@@ -1775,20 +1867,25 @@
 
 	mwifiex_set_wmm_params(priv, bss_cfg, params);
 
+	if (mwifiex_is_11h_active(priv))
+		mwifiex_set_tpc_params(priv, bss_cfg, params);
+
 	if (mwifiex_is_11h_active(priv) &&
 	    !cfg80211_chandef_dfs_required(wiphy, &params->chandef,
 					   priv->bss_mode)) {
-		dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "Disable 11h extensions in FW\n");
 		if (mwifiex_11h_activate(priv, false)) {
-			dev_err(priv->adapter->dev,
-				"Failed to disable 11h extensions!!");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Failed to disable 11h extensions!!");
 			return -1;
 		}
-		priv->state_11h.is_11h_active = true;
+		priv->state_11h.is_11h_active = false;
 	}
 
 	if (mwifiex_config_start_uap(priv, bss_cfg)) {
-		wiphy_err(wiphy, "Failed to start AP\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to start AP\n");
 		kfree(bss_cfg);
 		return -1;
 	}
@@ -1816,8 +1913,9 @@
 	if (mwifiex_deauthenticate(priv, NULL))
 		return -EFAULT;
 
-	wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
-		" reason code %d\n", priv->cfg_bssid, reason_code);
+	mwifiex_dbg(priv->adapter, MSG,
+		    "info: successfully disconnected from %pM:\t"
+		    "reason code %d\n", priv->cfg_bssid, reason_code);
 
 	eth_zero_addr(priv->cfg_bssid);
 	priv->hs2_enabled = false;
@@ -1899,13 +1997,13 @@
 
 	req_ssid.ssid_len = ssid_len;
 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
-		dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+		mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
 		return -EINVAL;
 	}
 
 	memcpy(req_ssid.ssid, ssid, ssid_len);
 	if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
-		dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+		mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
 		return -EINVAL;
 	}
 
@@ -1959,9 +2057,9 @@
 
 	if (sme->key) {
 		if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) {
-			dev_dbg(priv->adapter->dev,
-				"info: setting wep encryption"
-				" with key len %d\n", sme->key_len);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: setting wep encryption\t"
+				    "with key len %d\n", sme->key_len);
 			priv->wep_key_curr_index = sme->key_idx;
 			ret = mwifiex_set_encode(priv, NULL, sme->key,
 						 sme->key_len, sme->key_idx,
@@ -1978,7 +2076,7 @@
 		if (is_scanning_required) {
 			/* Do specific SSID scanning */
 			if (mwifiex_request_scan(priv, &req_ssid)) {
-				dev_err(priv->adapter->dev, "scan error\n");
+				mwifiex_dbg(priv->adapter, ERROR, "scan error\n");
 				return -EFAULT;
 			}
 		}
@@ -1997,15 +2095,15 @@
 
 		if (!bss) {
 			if (is_scanning_required) {
-				dev_warn(priv->adapter->dev,
-					 "assoc: requested bss not found in scan results\n");
+				mwifiex_dbg(priv->adapter, WARN,
+					    "assoc: requested bss not found in scan results\n");
 				break;
 			}
 			is_scanning_required = 1;
 		} else {
-			dev_dbg(priv->adapter->dev,
-				"info: trying to associate to '%s' bssid %pM\n",
-				(char *) req_ssid.ssid, bss->bssid);
+			mwifiex_dbg(priv->adapter, MSG,
+				    "info: trying to associate to '%s' bssid %pM\n",
+				    (char *)req_ssid.ssid, bss->bssid);
 			memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
 			break;
 		}
@@ -2041,26 +2139,29 @@
 	int ret;
 
 	if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
-		wiphy_err(wiphy,
-			  "%s: reject infra assoc request in non-STA role\n",
-			  dev->name);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: reject infra assoc request in non-STA role\n",
+			    dev->name);
 		return -EINVAL;
 	}
 
 	if (priv->wdev.current_bss) {
-		wiphy_warn(wiphy, "%s: already connected\n", dev->name);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: already connected\n", dev->name);
 		return -EALREADY;
 	}
 
 	if (adapter->surprise_removed || adapter->is_cmd_timedout) {
-		wiphy_err(wiphy,
-			  "%s: Ignore connection. Card removed or FW in bad state\n",
-			  dev->name);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: Ignore connection.\t"
+			    "Card removed or FW in bad state\n",
+			    dev->name);
 		return -EFAULT;
 	}
 
-	wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
-		  (char *) sme->ssid, sme->bssid);
+	mwifiex_dbg(adapter, INFO,
+		    "info: Trying to associate to %s and bssid %pM\n",
+		    (char *)sme->ssid, sme->bssid);
 
 	ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
 				     priv->bss_mode, sme->channel, sme, 0);
@@ -2068,17 +2169,17 @@
 		cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
 					NULL, 0, WLAN_STATUS_SUCCESS,
 					GFP_KERNEL);
-		dev_dbg(priv->adapter->dev,
-			"info: associated to bssid %pM successfully\n",
-			priv->cfg_bssid);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "info: associated to bssid %pM successfully\n",
+			    priv->cfg_bssid);
 		if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
 		    priv->adapter->auto_tdls &&
 		    priv->bss_type == MWIFIEX_BSS_TYPE_STA)
 			mwifiex_setup_auto_tdls_timer(priv);
 	} else {
-		dev_dbg(priv->adapter->dev,
-			"info: association to bssid %pM failed\n",
-			priv->cfg_bssid);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "info: association to bssid %pM failed\n",
+			    priv->cfg_bssid);
 		eth_zero_addr(priv->cfg_bssid);
 
 		if (ret > 0)
@@ -2105,7 +2206,6 @@
 static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
 				   struct cfg80211_ibss_params *params)
 {
-	struct wiphy *wiphy = priv->wdev.wiphy;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	int index = 0, i;
 	u8 config_bands = 0;
@@ -2162,8 +2262,10 @@
 	priv->adhoc_channel = ieee80211_frequency_to_channel(
 				params->chandef.chan->center_freq);
 
-	wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
-		  config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
+	mwifiex_dbg(adapter, INFO,
+		    "info: set ibss band %d, chan %d, chan offset %d\n",
+		    config_bands, priv->adhoc_channel,
+		    adapter->sec_chan_offset);
 
 	return 0;
 }
@@ -2182,13 +2284,15 @@
 	int ret = 0;
 
 	if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
-		wiphy_err(wiphy, "request to join ibss received "
-				"when station is not in ibss mode\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "request to join ibss received\t"
+			    "when station is not in ibss mode\n");
 		goto done;
 	}
 
-	wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
-		  (char *) params->ssid, params->bssid);
+	mwifiex_dbg(priv->adapter, MSG,
+		    "info: trying to join to %s and bssid %pM\n",
+		    (char *)params->ssid, params->bssid);
 
 	mwifiex_set_ibss_params(priv, params);
 
@@ -2200,12 +2304,12 @@
 	if (!ret) {
 		cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
 				     params->chandef.chan, GFP_KERNEL);
-		dev_dbg(priv->adapter->dev,
-			"info: joined/created adhoc network with bssid"
-			" %pM successfully\n", priv->cfg_bssid);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "info: joined/created adhoc network with bssid\t"
+			    "%pM successfully\n", priv->cfg_bssid);
 	} else {
-		dev_dbg(priv->adapter->dev,
-			"info: failed creating/joining adhoc network\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "info: failed creating/joining adhoc network\n");
 	}
 
 	return ret;
@@ -2222,8 +2326,8 @@
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
-	wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
-		  priv->cfg_bssid);
+	mwifiex_dbg(priv->adapter, MSG, "info: disconnecting from essid %pM\n",
+		    priv->cfg_bssid);
 	if (mwifiex_deauthenticate(priv, NULL))
 		return -EFAULT;
 
@@ -2250,13 +2354,15 @@
 	struct ieee_types_header *ie;
 	struct mwifiex_user_scan_cfg *user_scan_cfg;
 
-	wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
+	mwifiex_dbg(priv->adapter, CMD,
+		    "info: received scan request on %s\n", dev->name);
 
 	/* Block scan request if scan operation or scan cleanup when interface
 	 * is disabled is in process
 	 */
 	if (priv->scan_request || priv->scan_aborting) {
-		dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+		mwifiex_dbg(priv->adapter, WARN,
+			    "cmd: Scan already in process..\n");
 		return -EBUSY;
 	}
 
@@ -2308,7 +2414,8 @@
 	ret = mwifiex_scan_networks(priv, user_scan_cfg);
 	kfree(user_scan_cfg);
 	if (ret) {
-		dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "scan failed: %d\n", ret);
 		priv->scan_aborting = false;
 		priv->scan_request = NULL;
 		return ret;
@@ -2454,15 +2561,15 @@
 	case NL80211_IFTYPE_ADHOC:
 		if (adapter->curr_iface_comb.sta_intf ==
 		    adapter->iface_limit.sta_intf) {
-			wiphy_err(wiphy,
-				  "cannot create multiple sta/adhoc ifaces\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot create multiple sta/adhoc ifaces\n");
 			return ERR_PTR(-EINVAL);
 		}
 
 		priv = mwifiex_get_unused_priv(adapter);
 		if (!priv) {
-			wiphy_err(wiphy,
-				  "could not get free private struct\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "could not get free private struct\n");
 			return ERR_PTR(-EFAULT);
 		}
 
@@ -2478,21 +2585,21 @@
 		priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
 		priv->bss_priority = 0;
 		priv->bss_role = MWIFIEX_BSS_ROLE_STA;
-		priv->bss_num = 0;
+		priv->bss_num = adapter->curr_iface_comb.sta_intf;
 
 		break;
 	case NL80211_IFTYPE_AP:
 		if (adapter->curr_iface_comb.uap_intf ==
 		    adapter->iface_limit.uap_intf) {
-			wiphy_err(wiphy,
-				  "cannot create multiple AP ifaces\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot create multiple AP ifaces\n");
 			return ERR_PTR(-EINVAL);
 		}
 
 		priv = mwifiex_get_unused_priv(adapter);
 		if (!priv) {
-			wiphy_err(wiphy,
-				  "could not get free private struct\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "could not get free private struct\n");
 			return ERR_PTR(-EFAULT);
 		}
 
@@ -2504,22 +2611,22 @@
 		priv->bss_priority = 0;
 		priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
 		priv->bss_started = 0;
-		priv->bss_num = 0;
+		priv->bss_num = adapter->curr_iface_comb.uap_intf;
 		priv->bss_mode = type;
 
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (adapter->curr_iface_comb.p2p_intf ==
 		    adapter->iface_limit.p2p_intf) {
-			wiphy_err(wiphy,
-				  "cannot create multiple P2P ifaces\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot create multiple P2P ifaces\n");
 			return ERR_PTR(-EINVAL);
 		}
 
 		priv = mwifiex_get_unused_priv(adapter);
 		if (!priv) {
-			wiphy_err(wiphy,
-				  "could not get free private struct\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "could not get free private struct\n");
 			return ERR_PTR(-EFAULT);
 		}
 
@@ -2540,7 +2647,7 @@
 		priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
 		priv->bss_role = MWIFIEX_BSS_ROLE_STA;
 		priv->bss_started = 0;
-		priv->bss_num = 0;
+		priv->bss_num = adapter->curr_iface_comb.p2p_intf;
 
 		if (mwifiex_cfg80211_init_p2p_client(priv)) {
 			memset(&priv->wdev, 0, sizeof(priv->wdev));
@@ -2550,7 +2657,7 @@
 
 		break;
 	default:
-		wiphy_err(wiphy, "type not supported\n");
+		mwifiex_dbg(adapter, ERROR, "type not supported\n");
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -2558,7 +2665,8 @@
 			       name_assign_type, ether_setup,
 			       IEEE80211_NUM_ACS, 1);
 	if (!dev) {
-		wiphy_err(wiphy, "no memory available for netdevice\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "no memory available for netdevice\n");
 		memset(&priv->wdev, 0, sizeof(priv->wdev));
 		priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
 		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2599,7 +2707,8 @@
 
 	/* Register network device */
 	if (register_netdevice(dev)) {
-		wiphy_err(wiphy, "cannot register virtual network device\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot register virtual network device\n");
 		free_netdev(dev);
 		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 		priv->netdev = NULL;
@@ -2613,7 +2722,8 @@
 						  WQ_MEM_RECLAIM |
 						  WQ_UNBOUND, 1, name);
 	if (!priv->dfs_cac_workqueue) {
-		wiphy_err(wiphy, "cannot register virtual network device\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot register virtual network device\n");
 		free_netdev(dev);
 		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 		priv->netdev = NULL;
@@ -2628,7 +2738,8 @@
 						      WQ_HIGHPRI | WQ_UNBOUND |
 						      WQ_MEM_RECLAIM, 1, name);
 	if (!priv->dfs_chan_sw_workqueue) {
-		wiphy_err(wiphy, "cannot register virtual network device\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot register virtual network device\n");
 		free_netdev(dev);
 		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 		priv->netdev = NULL;
@@ -2642,7 +2753,8 @@
 
 	sema_init(&priv->async_sem, 1);
 
-	dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
+	mwifiex_dbg(adapter, INFO,
+		    "info: %s: Marvell 802.11 Adapter\n", dev->name);
 
 #ifdef CONFIG_DEBUG_FS
 	mwifiex_dev_debugfs_init(priv);
@@ -2661,7 +2773,7 @@
 		adapter->curr_iface_comb.p2p_intf++;
 		break;
 	default:
-		wiphy_err(wiphy, "type not supported\n");
+		mwifiex_dbg(adapter, ERROR, "type not supported\n");
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -2721,7 +2833,8 @@
 		adapter->curr_iface_comb.p2p_intf++;
 		break;
 	default:
-		dev_err(adapter->dev, "del_virtual_intf: type not supported\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "del_virtual_intf: type not supported\n");
 		break;
 	}
 
@@ -2839,7 +2952,8 @@
 		if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
 					byte_seq,
 					MWIFIEX_MEF_MAX_BYTESEQ)) {
-			dev_err(priv->adapter->dev, "Pattern not supported\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Pattern not supported\n");
 			kfree(mef_entry);
 			return -EOPNOTSUPP;
 		}
@@ -2954,21 +3068,22 @@
 	mwifiex_cancel_all_pending_cmd(adapter);
 
 	if (!wowlan) {
-		dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "None of the WOWLAN triggers enabled\n");
 		return 0;
 	}
 
 	priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
 
 	if (!priv->media_connected) {
-		dev_warn(adapter->dev,
-			 "Can not configure WOWLAN in disconnected state\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Can not configure WOWLAN in disconnected state\n");
 		return 0;
 	}
 
 	ret = mwifiex_set_mef_filter(priv, wowlan);
 	if (ret) {
-		dev_err(adapter->dev, "Failed to set MEF filter\n");
+		mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n");
 		return ret;
 	}
 
@@ -2981,7 +3096,8 @@
 		ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
 					    MWIFIEX_SYNC_CMD, &hs_cfg);
 		if (ret) {
-			dev_err(adapter->dev, "Failed to set HS params\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Failed to set HS params\n");
 			return ret;
 		}
 	}
@@ -3041,7 +3157,8 @@
 		if (!mwifiex_is_pattern_supported(&crule->patterns[i],
 						  byte_seq,
 						MWIFIEX_COALESCE_MAX_BYTESEQ)) {
-			dev_err(priv->adapter->dev, "Pattern not supported\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Pattern not supported\n");
 			return -EOPNOTSUPP;
 		}
 
@@ -3050,8 +3167,8 @@
 
 			pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
 			if (pkt_type && mrule->pkt_type) {
-				dev_err(priv->adapter->dev,
-					"Multiple packet types not allowed\n");
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "Multiple packet types not allowed\n");
 				return -EOPNOTSUPP;
 			} else if (pkt_type) {
 				mrule->pkt_type = pkt_type;
@@ -3074,8 +3191,8 @@
 	}
 
 	if (!mrule->pkt_type) {
-		dev_err(priv->adapter->dev,
-			"Packet type can not be determined\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Packet type can not be determined\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -3093,8 +3210,8 @@
 
 	memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
 	if (!coalesce) {
-		dev_dbg(adapter->dev,
-			"Disable coalesce and reset all previous rules\n");
+		mwifiex_dbg(adapter, WARN,
+			    "Disable coalesce and reset all previous rules\n");
 		return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
 					HostCmd_ACT_GEN_SET, 0,
 					&coalesce_cfg, true);
@@ -3105,8 +3222,8 @@
 		ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
 						      &coalesce_cfg.rule[i]);
 		if (ret) {
-			dev_err(priv->adapter->dev,
-				"Recheck the patterns provided for rule %d\n",
+			mwifiex_dbg(adapter, ERROR,
+				    "Recheck the patterns provided for rule %d\n",
 				i + 1);
 			return ret;
 		}
@@ -3138,9 +3255,9 @@
 
 	switch (action_code) {
 	case WLAN_TDLS_SETUP_REQUEST:
-		dev_dbg(priv->adapter->dev,
-			"Send TDLS Setup Request to %pM status_code=%d\n", peer,
-			 status_code);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Send TDLS Setup Request to %pM status_code=%d\n",
+			    peer, status_code);
 		mwifiex_add_auto_tdls_peer(priv, peer);
 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
 						   dialog_token, status_code,
@@ -3148,45 +3265,45 @@
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
 		mwifiex_add_auto_tdls_peer(priv, peer);
-		dev_dbg(priv->adapter->dev,
-			"Send TDLS Setup Response to %pM status_code=%d\n",
-			peer, status_code);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Send TDLS Setup Response to %pM status_code=%d\n",
+			    peer, status_code);
 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
 						   dialog_token, status_code,
 						   extra_ies, extra_ies_len);
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
-		dev_dbg(priv->adapter->dev,
-			"Send TDLS Confirm to %pM status_code=%d\n", peer,
-			status_code);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Send TDLS Confirm to %pM status_code=%d\n", peer,
+			    status_code);
 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
 						   dialog_token, status_code,
 						   extra_ies, extra_ies_len);
 		break;
 	case WLAN_TDLS_TEARDOWN:
-		dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
-			peer);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Send TDLS Tear down to %pM\n", peer);
 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
 						   dialog_token, status_code,
 						   extra_ies, extra_ies_len);
 		break;
 	case WLAN_TDLS_DISCOVERY_REQUEST:
-		dev_dbg(priv->adapter->dev,
-			"Send TDLS Discovery Request to %pM\n", peer);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Send TDLS Discovery Request to %pM\n", peer);
 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
 						   dialog_token, status_code,
 						   extra_ies, extra_ies_len);
 		break;
 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-		dev_dbg(priv->adapter->dev,
-			"Send TDLS Discovery Response to %pM\n", peer);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "Send TDLS Discovery Response to %pM\n", peer);
 		ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
 						   dialog_token, status_code,
 						   extra_ies, extra_ies_len);
 		break;
 	default:
-		dev_warn(priv->adapter->dev,
-			 "Unknown TDLS mgmt/action frame %pM\n", peer);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Unknown TDLS mgmt/action frame %pM\n", peer);
 		ret = -EINVAL;
 		break;
 	}
@@ -3208,8 +3325,8 @@
 	if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
 		return -ENOTSUPP;
 
-	dev_dbg(priv->adapter->dev,
-		"TDLS peer=%pM, oper=%d\n", peer, action);
+	mwifiex_dbg(priv->adapter, MSG,
+		    "TDLS peer=%pM, oper=%d\n", peer, action);
 
 	switch (action) {
 	case NL80211_TDLS_ENABLE_LINK:
@@ -3220,22 +3337,22 @@
 		break;
 	case NL80211_TDLS_TEARDOWN:
 		/* shouldn't happen!*/
-		dev_warn(priv->adapter->dev,
-			 "tdls_oper: teardown from driver not supported\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tdls_oper: teardown from driver not supported\n");
 		return -EINVAL;
 	case NL80211_TDLS_SETUP:
 		/* shouldn't happen!*/
-		dev_warn(priv->adapter->dev,
-			 "tdls_oper: setup from driver not supported\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tdls_oper: setup from driver not supported\n");
 		return -EINVAL;
 	case NL80211_TDLS_DISCOVERY_REQ:
 		/* shouldn't happen!*/
-		dev_warn(priv->adapter->dev,
-			 "tdls_oper: discovery from driver not supported\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tdls_oper: discovery from driver not supported\n");
 		return -EINVAL;
 	default:
-		dev_err(priv->adapter->dev,
-			"tdls_oper: operation not supported\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tdls_oper: operation not supported\n");
 		return -ENOTSUPP;
 	}
 
@@ -3268,8 +3385,8 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
 	if (priv->adapter->scan_processing) {
-		dev_err(priv->adapter->dev,
-			"radar detection: scan in process...\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "radar detection: scan in process...\n");
 		return -EBUSY;
 	}
 
@@ -3284,8 +3401,8 @@
 					   params->beacon_csa.tail,
 					   params->beacon_csa.tail_len);
 	if (!chsw_ie) {
-		dev_err(priv->adapter->dev,
-			"Could not parse channel switch announcement IE\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Could not parse channel switch announcement IE\n");
 		return -EINVAL;
 	}
 
@@ -3297,10 +3414,12 @@
 	}
 
 	if (mwifiex_del_mgmt_ies(priv))
-		wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to delete mgmt IEs!\n");
 
 	if (mwifiex_set_mgmt_ies(priv, &params->beacon_csa)) {
-		wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: setting mgmt ies failed\n", __func__);
 		return -EFAULT;
 	}
 
@@ -3314,6 +3433,45 @@
 	return 0;
 }
 
+static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
+					struct wireless_dev *wdev,
+					struct cfg80211_chan_def *chandef)
+{
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+	struct mwifiex_bssdescriptor *curr_bss;
+	struct ieee80211_channel *chan;
+	u8 second_chan_offset;
+	enum nl80211_channel_type chan_type;
+	enum ieee80211_band band;
+	int freq;
+	int ret = -ENODATA;
+
+	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
+	    cfg80211_chandef_valid(&priv->bss_chandef)) {
+		*chandef = priv->bss_chandef;
+		ret = 0;
+	} else if (priv->media_connected) {
+		curr_bss = &priv->curr_bss_params.bss_descriptor;
+		band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+		freq = ieee80211_channel_to_frequency(curr_bss->channel, band);
+		chan = ieee80211_get_channel(wiphy, freq);
+
+		if (curr_bss->bcn_ht_oper) {
+			second_chan_offset = curr_bss->bcn_ht_oper->ht_param &
+					IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
+			chan_type = mwifiex_sec_chan_offset_to_chan_type
+							(second_chan_offset);
+			cfg80211_chandef_create(chandef, chan, chan_type);
+		} else {
+			cfg80211_chandef_create(chandef, chan,
+						NL80211_CHAN_NO_HT);
+		}
+		ret = 0;
+	}
+
+	return ret;
+}
+
 static int
 mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
 				       struct net_device *dev,
@@ -3324,16 +3482,17 @@
 	struct mwifiex_radar_params radar_params;
 
 	if (priv->adapter->scan_processing) {
-		dev_err(priv->adapter->dev,
-			"radar detection: scan already in process...\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "radar detection: scan already in process...\n");
 		return -EBUSY;
 	}
 
 	if (!mwifiex_is_11h_active(priv)) {
-		dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "Enable 11h extensions in FW\n");
 		if (mwifiex_11h_activate(priv, true)) {
-			dev_err(priv->adapter->dev,
-				"Failed to activate 11h extensions!!");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Failed to activate 11h extensions!!");
 			return -1;
 		}
 		priv->state_11h.is_11h_active = true;
@@ -3418,6 +3577,7 @@
 	.tdls_oper = mwifiex_cfg80211_tdls_oper,
 	.add_station = mwifiex_cfg80211_add_station,
 	.change_station = mwifiex_cfg80211_change_station,
+	.get_channel = mwifiex_cfg80211_get_channel,
 	.start_radar_detection = mwifiex_cfg80211_start_radar_detection,
 	.channel_switch = mwifiex_cfg80211_channel_switch,
 };
@@ -3492,7 +3652,8 @@
 	wiphy = wiphy_new(&mwifiex_cfg80211_ops,
 			  sizeof(struct mwifiex_adapter *));
 	if (!wiphy) {
-		dev_err(adapter->dev, "%s: creating new wiphy\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: creating new wiphy\n", __func__);
 		return -ENOMEM;
 	}
 	wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
@@ -3524,7 +3685,8 @@
 			WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
 			WIPHY_FLAG_AP_UAPSD |
 			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
-			WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+			WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+			WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
 		wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
@@ -3563,20 +3725,22 @@
 
 	ret = wiphy_register(wiphy);
 	if (ret < 0) {
-		dev_err(adapter->dev,
-			"%s: wiphy_register failed: %d\n", __func__, ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: wiphy_register failed: %d\n", __func__, ret);
 		wiphy_free(wiphy);
 		return ret;
 	}
 
 	if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
-		wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
+		mwifiex_dbg(adapter, INFO,
+			    "driver hint alpha2: %2.2s\n", reg_alpha2);
 		regulatory_hint(wiphy, reg_alpha2);
 	} else {
 		country_code = mwifiex_11d_code_2_region(adapter->region_code);
 		if (country_code)
-			wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
-				   country_code);
+			mwifiex_dbg(adapter, WARN,
+				    "ignoring F/W country code %2.2s\n",
+				    country_code);
 	}
 
 	mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index e9df882..3ddb8ec 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -327,8 +327,9 @@
 		sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
 
 	if (!sband) {
-		dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
-			__func__, band);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: cannot find cfp by band %d\n",
+			    __func__, band);
 		return cfp;
 	}
 
@@ -349,9 +350,10 @@
 		}
 	}
 	if (i == sband->n_channels) {
-		dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
-			" & channel=%d freq=%d\n", __func__, band, channel,
-			freq);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: cannot find cfp by band %d\t"
+			    "& channel=%d freq=%d\n",
+			    __func__, band, channel, freq);
 	} else {
 		if (!ch)
 			return cfp;
@@ -431,15 +433,17 @@
 	    priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
 		switch (adapter->config_bands) {
 		case BAND_B:
-			dev_dbg(adapter->dev, "info: infra band=%d "
-				"supported_rates_b\n", adapter->config_bands);
+			mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+				    "supported_rates_b\n",
+				    adapter->config_bands);
 			k = mwifiex_copy_rates(rates, k, supported_rates_b,
 					       sizeof(supported_rates_b));
 			break;
 		case BAND_G:
 		case BAND_G | BAND_GN:
-			dev_dbg(adapter->dev, "info: infra band=%d "
-				"supported_rates_g\n", adapter->config_bands);
+			mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+				    "supported_rates_g\n",
+				    adapter->config_bands);
 			k = mwifiex_copy_rates(rates, k, supported_rates_g,
 					       sizeof(supported_rates_g));
 			break;
@@ -449,15 +453,17 @@
 		case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
 		case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
 		case BAND_B | BAND_G | BAND_GN:
-			dev_dbg(adapter->dev, "info: infra band=%d "
-				"supported_rates_bg\n", adapter->config_bands);
+			mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+				    "supported_rates_bg\n",
+				    adapter->config_bands);
 			k = mwifiex_copy_rates(rates, k, supported_rates_bg,
 					       sizeof(supported_rates_bg));
 			break;
 		case BAND_A:
 		case BAND_A | BAND_G:
-			dev_dbg(adapter->dev, "info: infra band=%d "
-				"supported_rates_a\n", adapter->config_bands);
+			mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+				    "supported_rates_a\n",
+				    adapter->config_bands);
 			k = mwifiex_copy_rates(rates, k, supported_rates_a,
 					       sizeof(supported_rates_a));
 			break;
@@ -466,14 +472,16 @@
 		case BAND_A | BAND_AN | BAND_AAC:
 		case BAND_A | BAND_G | BAND_AN | BAND_GN:
 		case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC:
-			dev_dbg(adapter->dev, "info: infra band=%d "
-				"supported_rates_a\n", adapter->config_bands);
+			mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+				    "supported_rates_a\n",
+				    adapter->config_bands);
 			k = mwifiex_copy_rates(rates, k, supported_rates_a,
 					       sizeof(supported_rates_a));
 			break;
 		case BAND_GN:
-			dev_dbg(adapter->dev, "info: infra band=%d "
-				"supported_rates_n\n", adapter->config_bands);
+			mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+				    "supported_rates_n\n",
+				    adapter->config_bands);
 			k = mwifiex_copy_rates(rates, k, supported_rates_n,
 					       sizeof(supported_rates_n));
 			break;
@@ -482,25 +490,25 @@
 		/* Ad-hoc mode */
 		switch (adapter->adhoc_start_band) {
 		case BAND_B:
-			dev_dbg(adapter->dev, "info: adhoc B\n");
+			mwifiex_dbg(adapter, INFO, "info: adhoc B\n");
 			k = mwifiex_copy_rates(rates, k, adhoc_rates_b,
 					       sizeof(adhoc_rates_b));
 			break;
 		case BAND_G:
 		case BAND_G | BAND_GN:
-			dev_dbg(adapter->dev, "info: adhoc G only\n");
+			mwifiex_dbg(adapter, INFO, "info: adhoc G only\n");
 			k = mwifiex_copy_rates(rates, k, adhoc_rates_g,
 					       sizeof(adhoc_rates_g));
 			break;
 		case BAND_B | BAND_G:
 		case BAND_B | BAND_G | BAND_GN:
-			dev_dbg(adapter->dev, "info: adhoc BG\n");
+			mwifiex_dbg(adapter, INFO, "info: adhoc BG\n");
 			k = mwifiex_copy_rates(rates, k, adhoc_rates_bg,
 					       sizeof(adhoc_rates_bg));
 			break;
 		case BAND_A:
 		case BAND_A | BAND_AN:
-			dev_dbg(adapter->dev, "info: adhoc A\n");
+			mwifiex_dbg(adapter, INFO, "info: adhoc A\n");
 			k = mwifiex_copy_rates(rates, k, adhoc_rates_a,
 					       sizeof(adhoc_rates_a));
 			break;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index c5a14ff..207da40 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -62,7 +62,8 @@
 
 	spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
 	if (list_empty(&adapter->cmd_free_q)) {
-		dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "GET_CMD_NODE: cmd node not available\n");
 		spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
 		return NULL;
 	}
@@ -116,7 +117,8 @@
 {
 	/* Copy the HOST command to command buffer */
 	memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len);
-	dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
+	mwifiex_dbg(priv->adapter, CMD,
+		    "cmd: host cmd size = %d\n", pcmd_ptr->len);
 	return 0;
 }
 
@@ -147,8 +149,9 @@
 
 	/* Sanity test */
 	if (host_cmd == NULL || host_cmd->size == 0) {
-		dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
-			" or cmd size is 0, not sending\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "DNLD_CMD: host_cmd is null\t"
+			    "or cmd size is 0, not sending\n");
 		if (cmd_node->wait_q_enabled)
 			adapter->cmd_wait_q.status = -1;
 		mwifiex_recycle_cmd_node(adapter, cmd_node);
@@ -161,8 +164,8 @@
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
 	    cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
 	    cmd_code != HostCmd_CMD_FUNC_INIT) {
-		dev_err(adapter->dev,
-			"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
+		mwifiex_dbg(adapter, ERROR,
+			    "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
 			cmd_code);
 		if (cmd_node->wait_q_enabled)
 			mwifiex_complete_cmd(adapter, cmd_node);
@@ -197,10 +200,12 @@
 		 */
 		skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
 
-	dev_dbg(adapter->dev,
-		"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code,
-		le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
-		le16_to_cpu(host_cmd->seq_num));
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+		    cmd_code,
+		    le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)),
+		    cmd_size, le16_to_cpu(host_cmd->seq_num));
+	mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
 
 	if (adapter->iface_type == MWIFIEX_USB) {
 		tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
@@ -221,7 +226,8 @@
 	}
 
 	if (ret == -1) {
-		dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "DNLD_CMD: host to card failed\n");
 		if (adapter->iface_type == MWIFIEX_USB)
 			adapter->cmd_sent = false;
 		if (cmd_node->wait_q_enabled)
@@ -280,12 +286,14 @@
 					(adapter->seq_num, priv->bss_num,
 					 priv->bss_type)));
 
-	dev_dbg(adapter->dev,
-		"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
 		le16_to_cpu(sleep_cfm_buf->command),
 		le16_to_cpu(sleep_cfm_buf->action),
 		le16_to_cpu(sleep_cfm_buf->size),
 		le16_to_cpu(sleep_cfm_buf->seq_num));
+	mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf,
+			 le16_to_cpu(sleep_cfm_buf->size));
 
 	if (adapter->iface_type == MWIFIEX_USB) {
 		sleep_cfm_tmp =
@@ -311,7 +319,7 @@
 	}
 
 	if (ret == -1) {
-		dev_err(adapter->dev, "SLEEP_CFM: failed\n");
+		mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n");
 		adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
 		return -1;
 	}
@@ -362,8 +370,9 @@
 	for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
 		cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
 		if (!cmd_array[i].skb) {
-			dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
-			return -1;
+			mwifiex_dbg(adapter, ERROR,
+				    "unable to allocate command buffer\n");
+			return -ENOMEM;
 		}
 	}
 
@@ -386,7 +395,8 @@
 
 	/* Need to check if cmd pool is allocated or not */
 	if (!adapter->cmd_pool) {
-		dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
+		mwifiex_dbg(adapter, FATAL,
+			    "info: FREE_CMD_BUF: cmd_pool is null\n");
 		return 0;
 	}
 
@@ -395,7 +405,8 @@
 	/* Release shared memory buffers */
 	for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
 		if (cmd_array[i].skb) {
-			dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
+			mwifiex_dbg(adapter, CMD,
+				    "cmd: free cmd buffer %d\n", i);
 			dev_kfree_skb_any(cmd_array[i].skb);
 		}
 		if (!cmd_array[i].resp_skb)
@@ -409,7 +420,8 @@
 	}
 	/* Release struct cmd_ctrl_node */
 	if (adapter->cmd_pool) {
-		dev_dbg(adapter->dev, "cmd: free cmd pool\n");
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: free cmd pool\n");
 		kfree(adapter->cmd_pool);
 		adapter->cmd_pool = NULL;
 	}
@@ -457,9 +469,11 @@
 		memset(rx_info, 0, sizeof(*rx_info));
 		rx_info->bss_num = priv->bss_num;
 		rx_info->bss_type = priv->bss_type;
+		mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:",
+				 skb->data, skb->len);
 	}
 
-	dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
+	mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause);
 
 	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
 		ret = mwifiex_process_uap_event(priv);
@@ -498,28 +512,33 @@
 	}
 
 	if (adapter->is_suspended) {
-		dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: device in suspended state\n");
 		return -1;
 	}
 
 	if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
-		dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: host entering sleep state\n");
 		return -1;
 	}
 
 	if (adapter->surprise_removed) {
-		dev_err(adapter->dev, "PREP_CMD: card is removed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: card is removed\n");
 		return -1;
 	}
 
 	if (adapter->is_cmd_timedout) {
-		dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: FW is in bad state\n");
 		return -1;
 	}
 
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
 		if (cmd_no != HostCmd_CMD_FUNC_INIT) {
-			dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "PREP_CMD: FW in reset state\n");
 			return -1;
 		}
 	}
@@ -528,7 +547,8 @@
 	cmd_node = mwifiex_get_cmd_node(adapter);
 
 	if (!cmd_node) {
-		dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: no free cmd node\n");
 		return -1;
 	}
 
@@ -536,7 +556,8 @@
 	mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
 
 	if (!cmd_node->cmd_skb) {
-		dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: no free cmd buf\n");
 		return -1;
 	}
 
@@ -554,6 +575,8 @@
 		case HostCmd_CMD_UAP_BSS_START:
 		case HostCmd_CMD_UAP_BSS_STOP:
 		case HostCmd_CMD_UAP_STA_DEAUTH:
+		case HOST_CMD_APCMD_SYS_RESET:
+		case HOST_CMD_APCMD_STA_LIST:
 			ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
 						      cmd_oid, data_buf,
 						      cmd_ptr);
@@ -571,7 +594,8 @@
 
 	/* Return error, since the command preparation failed */
 	if (ret) {
-		dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
+		mwifiex_dbg(adapter, ERROR,
+			    "PREP_CMD: cmd %#x preparation failed\n",
 			cmd_no);
 		mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
 		return -1;
@@ -626,7 +650,8 @@
 	mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
 
 	atomic_dec(&adapter->cmd_pending);
-	dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
 		le16_to_cpu(host_cmd->command),
 		atomic_read(&adapter->cmd_pending));
 }
@@ -648,7 +673,7 @@
 
 	host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
 	if (!host_cmd) {
-		dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
+		mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n");
 		return;
 	}
 
@@ -673,7 +698,8 @@
 	spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 
 	atomic_inc(&adapter->cmd_pending);
-	dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
 		command, atomic_read(&adapter->cmd_pending));
 }
 
@@ -699,7 +725,8 @@
 
 	/* Check if already in processing */
 	if (adapter->curr_cmd) {
-		dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
+		mwifiex_dbg(adapter, FATAL,
+			    "EXEC_NEXT_CMD: cmd in processing\n");
 		return -1;
 	}
 
@@ -721,8 +748,9 @@
 	priv = cmd_node->priv;
 
 	if (adapter->ps_state != PS_STATE_AWAKE) {
-		dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
-				" this should not happen\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: cannot send cmd in sleep state,\t"
+			    "this should not happen\n", __func__);
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
 		return ret;
 	}
@@ -772,8 +800,9 @@
 
 	if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
 		resp = (struct host_cmd_ds_command *) adapter->upld_buf;
-		dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
-			le16_to_cpu(resp->command));
+		mwifiex_dbg(adapter, ERROR,
+			    "CMD_RESP: NULL curr_cmd, %#x\n",
+			    le16_to_cpu(resp->command));
 		return -1;
 	}
 
@@ -781,8 +810,9 @@
 
 	resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
 	if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
-		dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
-			le16_to_cpu(resp->command));
+		mwifiex_dbg(adapter, ERROR,
+			    "CMD_RESP: %#x been canceled\n",
+			    le16_to_cpu(resp->command));
 		mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
 		adapter->curr_cmd = NULL;
@@ -794,7 +824,8 @@
 		/* Copy original response back to response buffer */
 		struct mwifiex_ds_misc_cmd *hostcmd;
 		uint16_t size = le16_to_cpu(resp->size);
-		dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
+		mwifiex_dbg(adapter, INFO,
+			    "info: host cmd resp size = %d\n", size);
 		size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
 		if (adapter->curr_cmd->data_buf) {
 			hostcmd = adapter->curr_cmd->data_buf;
@@ -822,13 +853,15 @@
 	adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
 								orig_cmdresp_no;
 
-	dev_dbg(adapter->dev,
-		"cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
-		orig_cmdresp_no, cmdresp_result,
-		le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+		    orig_cmdresp_no, cmdresp_result,
+		    le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+	mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp,
+			 le16_to_cpu(resp->size));
 
 	if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
-		dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
+		mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n");
 		if (adapter->curr_cmd->wait_q_enabled)
 			adapter->cmd_wait_q.status = -1;
 
@@ -852,8 +885,9 @@
 	/* Check init command response */
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
 		if (ret) {
-			dev_err(adapter->dev, "%s: cmd %#x failed during "
-				"initialization\n", __func__, cmdresp_no);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: cmd %#x failed during\t"
+				    "initialization\n", __func__, cmdresp_no);
 			mwifiex_init_fw_complete(adapter);
 			return -1;
 		} else if (adapter->last_init_cmd == cmdresp_no)
@@ -888,7 +922,8 @@
 
 	adapter->is_cmd_timedout = 1;
 	if (!adapter->curr_cmd) {
-		dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cmd: empty curr_cmd\n");
 		return;
 	}
 	cmd_node = adapter->curr_cmd;
@@ -897,47 +932,60 @@
 			adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
 		adapter->dbg.timeout_cmd_act =
 			adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
-		dev_err(adapter->dev,
-			"%s: Timeout cmd id = %#x, act = %#x\n", __func__,
-			adapter->dbg.timeout_cmd_id,
-			adapter->dbg.timeout_cmd_act);
+		mwifiex_dbg(adapter, MSG,
+			    "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
+			    adapter->dbg.timeout_cmd_id,
+			    adapter->dbg.timeout_cmd_act);
 
-		dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
-			adapter->dbg.num_tx_host_to_card_failure);
-		dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
-			adapter->dbg.num_cmd_host_to_card_failure);
+		mwifiex_dbg(adapter, MSG,
+			    "num_data_h2c_failure = %d\n",
+			    adapter->dbg.num_tx_host_to_card_failure);
+		mwifiex_dbg(adapter, MSG,
+			    "num_cmd_h2c_failure = %d\n",
+			    adapter->dbg.num_cmd_host_to_card_failure);
 
-		dev_err(adapter->dev, "is_cmd_timedout = %d\n",
-			adapter->is_cmd_timedout);
-		dev_err(adapter->dev, "num_tx_timeout = %d\n",
-			adapter->dbg.num_tx_timeout);
+		mwifiex_dbg(adapter, MSG,
+			    "is_cmd_timedout = %d\n",
+			    adapter->is_cmd_timedout);
+		mwifiex_dbg(adapter, MSG,
+			    "num_tx_timeout = %d\n",
+			    adapter->dbg.num_tx_timeout);
 
-		dev_err(adapter->dev, "last_cmd_index = %d\n",
-			adapter->dbg.last_cmd_index);
-		dev_err(adapter->dev, "last_cmd_id: %*ph\n",
-			(int)sizeof(adapter->dbg.last_cmd_id),
-			adapter->dbg.last_cmd_id);
-		dev_err(adapter->dev, "last_cmd_act: %*ph\n",
-			(int)sizeof(adapter->dbg.last_cmd_act),
-			adapter->dbg.last_cmd_act);
+		mwifiex_dbg(adapter, MSG,
+			    "last_cmd_index = %d\n",
+			    adapter->dbg.last_cmd_index);
+		mwifiex_dbg(adapter, MSG,
+			    "last_cmd_id: %*ph\n",
+			    (int)sizeof(adapter->dbg.last_cmd_id),
+			    adapter->dbg.last_cmd_id);
+		mwifiex_dbg(adapter, MSG,
+			    "last_cmd_act: %*ph\n",
+			    (int)sizeof(adapter->dbg.last_cmd_act),
+			    adapter->dbg.last_cmd_act);
 
-		dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
-			adapter->dbg.last_cmd_resp_index);
-		dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
-			(int)sizeof(adapter->dbg.last_cmd_resp_id),
-			adapter->dbg.last_cmd_resp_id);
+		mwifiex_dbg(adapter, MSG,
+			    "last_cmd_resp_index = %d\n",
+			    adapter->dbg.last_cmd_resp_index);
+		mwifiex_dbg(adapter, MSG,
+			    "last_cmd_resp_id: %*ph\n",
+			    (int)sizeof(adapter->dbg.last_cmd_resp_id),
+			    adapter->dbg.last_cmd_resp_id);
 
-		dev_err(adapter->dev, "last_event_index = %d\n",
-			adapter->dbg.last_event_index);
-		dev_err(adapter->dev, "last_event: %*ph\n",
-			(int)sizeof(adapter->dbg.last_event),
-			adapter->dbg.last_event);
+		mwifiex_dbg(adapter, MSG,
+			    "last_event_index = %d\n",
+			    adapter->dbg.last_event_index);
+		mwifiex_dbg(adapter, MSG,
+			    "last_event: %*ph\n",
+			    (int)sizeof(adapter->dbg.last_event),
+			    adapter->dbg.last_event);
 
-		dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
-			adapter->data_sent, adapter->cmd_sent);
+		mwifiex_dbg(adapter, MSG,
+			    "data_sent=%d cmd_sent=%d\n",
+			    adapter->data_sent, adapter->cmd_sent);
 
-		dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
-			adapter->ps_mode, adapter->ps_state);
+		mwifiex_dbg(adapter, MSG,
+			    "ps_mode=%d ps_state=%d\n",
+			    adapter->ps_mode, adapter->ps_state);
 
 		if (cmd_node->wait_q_enabled) {
 			adapter->cmd_wait_q.status = -ETIMEDOUT;
@@ -948,8 +996,8 @@
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
 		mwifiex_init_fw_complete(adapter);
 
-	if (adapter->if_ops.fw_dump)
-		adapter->if_ops.fw_dump(adapter);
+	if (adapter->if_ops.device_dump)
+		adapter->if_ops.device_dump(adapter);
 
 	if (adapter->if_ops.card_reset)
 		adapter->if_ops.card_reset(adapter);
@@ -1015,7 +1063,7 @@
 			if (!priv)
 				continue;
 			if (priv->scan_request) {
-				dev_dbg(adapter->dev, "info: aborting scan\n");
+				mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
 				cfg80211_scan_done(priv->scan_request, 1);
 				priv->scan_request = NULL;
 			}
@@ -1075,7 +1123,7 @@
 			if (!priv)
 				continue;
 			if (priv->scan_request) {
-				dev_dbg(adapter->dev, "info: aborting scan\n");
+				mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
 				cfg80211_scan_done(priv->scan_request, 1);
 				priv->scan_request = NULL;
 			}
@@ -1100,11 +1148,11 @@
 	    !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
 		mwifiex_dnld_sleep_confirm_cmd(adapter);
 	else
-		dev_dbg(adapter->dev,
-			"cmd: Delay Sleep Confirm (%s%s%s)\n",
-			(adapter->cmd_sent) ? "D" : "",
-			(adapter->curr_cmd) ? "C" : "",
-			(IS_CARD_RX_RCVD(adapter)) ? "R" : "");
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: Delay Sleep Confirm (%s%s%s)\n",
+			    (adapter->cmd_sent) ? "D" : "",
+			    (adapter->curr_cmd) ? "C" : "",
+			    (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
 }
 
 /*
@@ -1120,15 +1168,18 @@
 			priv->adapter->hs_activated = true;
 			mwifiex_update_rxreor_flags(priv->adapter,
 						    RXREOR_FORCE_NO_DROP);
-			dev_dbg(priv->adapter->dev, "event: hs_activated\n");
+			mwifiex_dbg(priv->adapter, EVENT,
+				    "event: hs_activated\n");
 			priv->adapter->hs_activate_wait_q_woken = true;
 			wake_up_interruptible(
 				&priv->adapter->hs_activate_wait_q);
 		} else {
-			dev_dbg(priv->adapter->dev, "event: HS not configured\n");
+			mwifiex_dbg(priv->adapter, EVENT,
+				    "event: HS not configured\n");
 		}
 	} else {
-		dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "event: hs_deactivated\n");
 		priv->adapter->hs_activated = false;
 	}
 }
@@ -1156,11 +1207,12 @@
 		mwifiex_hs_activated_event(priv, true);
 		return 0;
 	} else {
-		dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
-			" result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
-			resp->result, conditions,
-			phs_cfg->params.hs_config.gpio,
-			phs_cfg->params.hs_config.gap);
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: CMD_RESP: HS_CFG cmd reply\t"
+			    " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
+			    resp->result, conditions,
+			    phs_cfg->params.hs_config.gpio,
+			    phs_cfg->params.hs_config.gap);
 	}
 	if (conditions != HS_CFG_CANCEL) {
 		adapter->is_hs_configured = true;
@@ -1182,8 +1234,10 @@
 void
 mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
 {
-	dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
-		" since there is interrupt from the firmware\n", __func__);
+	mwifiex_dbg(adapter, INFO,
+		    "info: %s: auto cancelling host sleep\t"
+		    "since there is interrupt from the firmware\n",
+		    __func__);
 
 	adapter->if_ops.wakeup(adapter);
 	adapter->hs_activated = false;
@@ -1212,13 +1266,14 @@
 	uint16_t seq_num = le16_to_cpu(cmd->seq_num);
 
 	if (!upld_len) {
-		dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: cmd size is 0\n", __func__);
 		return;
 	}
 
-	dev_dbg(adapter->dev,
-		"cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
-		command, result, le16_to_cpu(cmd->size), seq_num);
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+		    command, result, le16_to_cpu(cmd->size), seq_num);
 
 	/* Get BSS number and corresponding priv */
 	priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
@@ -1232,15 +1287,16 @@
 	command &= HostCmd_CMD_ID_MASK;
 
 	if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
-		dev_err(adapter->dev,
-			"%s: rcvd unexpected resp for cmd %#x, result = %x\n",
-			__func__, command, result);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
+			    __func__, command, result);
 		return;
 	}
 
 	if (result) {
-		dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
-			__func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: sleep confirm cmd failed\n",
+			    __func__);
 		adapter->pm_wakeup_card_req = false;
 		adapter->ps_state = PS_STATE_AWAKE;
 		return;
@@ -1305,7 +1361,8 @@
 					sizeof(struct mwifiex_ie_types_header));
 			cmd_size += sizeof(*ps_tlv);
 			tlv += sizeof(*ps_tlv);
-			dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
+			mwifiex_dbg(priv->adapter, CMD,
+				    "cmd: PS Command: Enter PS\n");
 			ps_mode->null_pkt_interval =
 					cpu_to_le16(adapter->null_pkt_interval);
 			ps_mode->multiple_dtims =
@@ -1335,8 +1392,8 @@
 			tlv += sizeof(*auto_ds_tlv);
 			if (auto_ds)
 				idletime = auto_ds->idle_time;
-			dev_dbg(priv->adapter->dev,
-				"cmd: PS Command: Enter Auto Deep Sleep\n");
+			mwifiex_dbg(priv->adapter, CMD,
+				    "cmd: PS Command: Enter Auto Deep Sleep\n");
 			auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
 		}
 		cmd->size = cpu_to_le16(cmd_size);
@@ -1363,27 +1420,31 @@
 	uint16_t auto_ps_bitmap =
 		le16_to_cpu(ps_mode->params.ps_bitmap);
 
-	dev_dbg(adapter->dev,
-		"info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
-		__func__, resp->result, action);
+	mwifiex_dbg(adapter, INFO,
+		    "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
+		    __func__, resp->result, action);
 	if (action == EN_AUTO_PS) {
 		if (auto_ps_bitmap & BITMAP_AUTO_DS) {
-			dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
+			mwifiex_dbg(adapter, CMD,
+				    "cmd: Enabled auto deep sleep\n");
 			priv->adapter->is_deep_sleep = true;
 		}
 		if (auto_ps_bitmap & BITMAP_STA_PS) {
-			dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
+			mwifiex_dbg(adapter, CMD,
+				    "cmd: Enabled STA power save\n");
 			if (adapter->sleep_period.period)
-				dev_dbg(adapter->dev,
-					"cmd: set to uapsd/pps mode\n");
+				mwifiex_dbg(adapter, CMD,
+					    "cmd: set to uapsd/pps mode\n");
 		}
 	} else if (action == DIS_AUTO_PS) {
 		if (ps_bitmap & BITMAP_AUTO_DS) {
 			priv->adapter->is_deep_sleep = false;
-			dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
+			mwifiex_dbg(adapter, CMD,
+				    "cmd: Disabled auto deep sleep\n");
 		}
 		if (ps_bitmap & BITMAP_STA_PS) {
-			dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
+			mwifiex_dbg(adapter, CMD,
+				    "cmd: Disabled STA power save\n");
 			if (adapter->sleep_period.period) {
 				adapter->delay_null_pkt = false;
 				adapter->tx_lock_flag = false;
@@ -1396,7 +1457,8 @@
 		else
 			adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
 
-		dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: ps_bitmap=%#x\n", ps_bitmap);
 
 		if (pm_cfg) {
 			/* This section is for get power save mode */
@@ -1533,29 +1595,29 @@
 							api_rev->major_ver;
 					adapter->key_api_minor_ver =
 							api_rev->minor_ver;
-					dev_dbg(adapter->dev,
-						"key_api v%d.%d\n",
-						adapter->key_api_major_ver,
-						adapter->key_api_minor_ver);
+					mwifiex_dbg(adapter, INFO,
+						    "key_api v%d.%d\n",
+						    adapter->key_api_major_ver,
+						    adapter->key_api_minor_ver);
 					break;
 				case FW_API_VER_ID:
 					adapter->fw_api_ver =
 							api_rev->major_ver;
-					dev_dbg(adapter->dev,
-						"Firmware api version %d\n",
-						adapter->fw_api_ver);
+					mwifiex_dbg(adapter, INFO,
+						    "Firmware api version %d\n",
+						    adapter->fw_api_ver);
 					break;
 				default:
-					dev_warn(adapter->dev,
-						 "Unknown api_id: %d\n",
-						 api_id);
+					mwifiex_dbg(adapter, FATAL,
+						    "Unknown api_id: %d\n",
+						    api_id);
 					break;
 				}
 				break;
 			default:
-				dev_warn(adapter->dev,
-					 "Unknown GET_HW_SPEC TLV type: %#x\n",
-					 le16_to_cpu(tlv->type));
+				mwifiex_dbg(adapter, FATAL,
+					    "Unknown GET_HW_SPEC TLV type: %#x\n",
+					    le16_to_cpu(tlv->type));
 				break;
 			}
 			parsed_len += le16_to_cpu(tlv->len) +
@@ -1565,14 +1627,16 @@
 		}
 	}
 
-	dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
-		adapter->fw_release_number);
-	dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
-		hw_spec->permanent_addr);
-	dev_dbg(adapter->dev,
-		"info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
-		le16_to_cpu(hw_spec->hw_if_version),
-		le16_to_cpu(hw_spec->version));
+	mwifiex_dbg(adapter, INFO,
+		    "info: GET_HW_SPEC: fw_release_number- %#x\n",
+		    adapter->fw_release_number);
+	mwifiex_dbg(adapter, INFO,
+		    "info: GET_HW_SPEC: permanent addr: %pM\n",
+		    hw_spec->permanent_addr);
+	mwifiex_dbg(adapter, INFO,
+		    "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
+		    le16_to_cpu(hw_spec->hw_if_version),
+		    le16_to_cpu(hw_spec->version));
 
 	ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr);
 	adapter->region_code = le16_to_cpu(hw_spec->region_code);
@@ -1585,8 +1649,8 @@
 	/* If it's unidentified region code, use the default (USA) */
 	if (i >= MWIFIEX_MAX_REGION_CODE) {
 		adapter->region_code = 0x10;
-		dev_dbg(adapter->dev,
-			"cmd: unknown region code, use default (USA)\n");
+		mwifiex_dbg(adapter, WARN,
+			    "cmd: unknown region code, use default (USA)\n");
 	}
 
 	adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 1fb329d..5a0636d4 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -152,24 +152,24 @@
 }
 
 /*
- * Proc firmware dump read handler.
+ * Proc device dump read handler.
  *
- * This function is called when the 'fw_dump' file is opened for
+ * This function is called when the 'device_dump' file is opened for
  * reading.
- * This function dumps firmware memory in different files
- * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * This function dumps driver information and firmware memory segments
+ * (ex. DTCM, ITCM, SQRAM etc.) for
  * debugging.
  */
 static ssize_t
-mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
-		     size_t count, loff_t *ppos)
+mwifiex_device_dump_read(struct file *file, char __user *ubuf,
+			 size_t count, loff_t *ppos)
 {
 	struct mwifiex_private *priv = file->private_data;
 
-	if (!priv->adapter->if_ops.fw_dump)
+	if (!priv->adapter->if_ops.device_dump)
 		return -EIO;
 
-	priv->adapter->if_ops.fw_dump(priv->adapter);
+	priv->adapter->if_ops.device_dump(priv->adapter);
 
 	return 0;
 }
@@ -535,6 +535,144 @@
 	return ret;
 }
 
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_read(struct file *file, char __user *ubuf,
+			size_t count, loff_t *ppos)
+{
+	struct mwifiex_private *priv =
+		(struct mwifiex_private *)file->private_data;
+	unsigned long page = get_zeroed_page(GFP_KERNEL);
+	char *buf = (char *)page;
+	size_t ret = 0;
+	int pos = 0;
+
+	if (!buf)
+		return -ENOMEM;
+
+	pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n",
+			priv->adapter->debug_mask);
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+	free_page(page);
+	return ret;
+}
+
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_write(struct file *file, const char __user *ubuf,
+			 size_t count, loff_t *ppos)
+{
+	int ret;
+	unsigned long debug_mask;
+	struct mwifiex_private *priv = (void *)file->private_data;
+	unsigned long addr = get_zeroed_page(GFP_KERNEL);
+	char *buf = (void *)addr;
+	size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, buf_size)) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	if (kstrtoul(buf, 0, &debug_mask)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	priv->adapter->debug_mask = debug_mask;
+	ret = count;
+done:
+	free_page(addr);
+	return ret;
+}
+
+/* Proc memrw file write handler.
+ * This function is called when the 'memrw' file is opened for writing
+ * This function can be used to write to a memory location.
+ */
+static ssize_t
+mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count,
+		    loff_t *ppos)
+{
+	int ret;
+	char cmd;
+	struct mwifiex_ds_mem_rw mem_rw;
+	u16 cmd_action;
+	struct mwifiex_private *priv = (void *)file->private_data;
+	unsigned long addr = get_zeroed_page(GFP_KERNEL);
+	char *buf = (void *)addr;
+	size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, buf_size)) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value);
+	if (ret != 3) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((cmd == 'r') || (cmd == 'R')) {
+		cmd_action = HostCmd_ACT_GEN_GET;
+		mem_rw.value = 0;
+	} else if ((cmd == 'w') || (cmd == 'W')) {
+		cmd_action = HostCmd_ACT_GEN_SET;
+	} else {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&priv->mem_rw, &mem_rw, sizeof(mem_rw));
+	if (mwifiex_send_cmd(priv, HostCmd_CMD_MEM_ACCESS, cmd_action, 0,
+			     &mem_rw, true))
+		ret = -1;
+	else
+		ret = count;
+
+done:
+	free_page(addr);
+	return ret;
+}
+
+/* Proc memrw file read handler.
+ * This function is called when the 'memrw' file is opened for reading
+ * This function can be used to read from a memory location.
+ */
+static ssize_t
+mwifiex_memrw_read(struct file *file, char __user *ubuf,
+		   size_t count, loff_t *ppos)
+{
+	struct mwifiex_private *priv = (void *)file->private_data;
+	unsigned long addr = get_zeroed_page(GFP_KERNEL);
+	char *buf = (char *)addr;
+	int ret, pos = 0;
+
+	if (!buf)
+		return -ENOMEM;
+
+	pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr,
+			priv->mem_rw.value);
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+	free_page(addr);
+	return ret;
+}
+
 static u32 saved_offset = -1, saved_bytes = -1;
 
 /*
@@ -654,7 +792,8 @@
 	memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
 
 	if (arg_num > 3) {
-		dev_err(priv->adapter->dev, "Too many arguments\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Too many arguments\n");
 		ret = -EINVAL;
 		goto done;
 	}
@@ -746,11 +885,13 @@
 MWIFIEX_DFS_FILE_READ_OPS(info);
 MWIFIEX_DFS_FILE_READ_OPS(debug);
 MWIFIEX_DFS_FILE_READ_OPS(getlog);
-MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
+MWIFIEX_DFS_FILE_READ_OPS(device_dump);
 MWIFIEX_DFS_FILE_OPS(regrdwr);
 MWIFIEX_DFS_FILE_OPS(rdeeprom);
+MWIFIEX_DFS_FILE_OPS(memrw);
 MWIFIEX_DFS_FILE_OPS(hscfg);
 MWIFIEX_DFS_FILE_OPS(histogram);
+MWIFIEX_DFS_FILE_OPS(debug_mask);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -772,9 +913,11 @@
 	MWIFIEX_DFS_ADD_FILE(getlog);
 	MWIFIEX_DFS_ADD_FILE(regrdwr);
 	MWIFIEX_DFS_ADD_FILE(rdeeprom);
-	MWIFIEX_DFS_ADD_FILE(fw_dump);
+	MWIFIEX_DFS_ADD_FILE(device_dump);
+	MWIFIEX_DFS_ADD_FILE(memrw);
 	MWIFIEX_DFS_ADD_FILE(hscfg);
 	MWIFIEX_DFS_ADD_FILE(histogram);
+	MWIFIEX_DFS_ADD_FILE(debug_mask);
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 38f24e0..51e3447 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -29,7 +29,7 @@
 #include <uapi/linux/if_arp.h>
 #include <net/mac80211.h>
 
-
+#define MWIFIEX_BSS_COEX_COUNT	     2
 #define MWIFIEX_MAX_BSS_NUM         (3)
 
 #define MWIFIEX_DMA_ALIGN_SZ	    64
@@ -49,7 +49,12 @@
 
 #define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE        64
 #define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE        64
+#define MWIFIEX_STA_COEX_AMPDU_DEF_RXWINSIZE   16
+
 #define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE        32
+
+#define MWIFIEX_UAP_COEX_AMPDU_DEF_RXWINSIZE   16
+
 #define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE        16
 #define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE   64
 #define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE   64
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
index 65d8d6d..58400c6 100644
--- a/drivers/net/wireless/mwifiex/ethtool.c
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -64,104 +64,7 @@
 	return 0;
 }
 
-static int
-mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
-{
-	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-	struct mwifiex_adapter *adapter = priv->adapter;
-	struct memory_type_mapping *entry;
-
-	if (!adapter->if_ops.fw_dump)
-		return -ENOTSUPP;
-
-	dump->flag = adapter->curr_mem_idx;
-	dump->version = 1;
-	if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
-		dump->len = adapter->drv_info_size;
-	} else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
-		entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
-		dump->len = entry->mem_size;
-	} else {
-		dump->len = 0;
-	}
-
-	return 0;
-}
-
-static int
-mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
-		      void *buffer)
-{
-	u8 *p = buffer;
-	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-	struct mwifiex_adapter *adapter = priv->adapter;
-	struct memory_type_mapping *entry;
-
-	if (!adapter->if_ops.fw_dump)
-		return -ENOTSUPP;
-
-	if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
-		if (!adapter->drv_info_dump)
-			return -EFAULT;
-		memcpy(p, adapter->drv_info_dump, adapter->drv_info_size);
-		return 0;
-	}
-
-	if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
-		dev_err(adapter->dev, "firmware dump in progress!!\n");
-		return -EBUSY;
-	}
-
-	entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
-
-	if (!entry->mem_ptr)
-		return -EFAULT;
-
-	memcpy(p, entry->mem_ptr, entry->mem_size);
-
-	entry->mem_size = 0;
-	vfree(entry->mem_ptr);
-	entry->mem_ptr = NULL;
-
-	return 0;
-}
-
-static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
-{
-	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-	struct mwifiex_adapter *adapter = priv->adapter;
-
-	if (!adapter->if_ops.fw_dump)
-		return -ENOTSUPP;
-
-	if (val->flag == MWIFIEX_DRV_INFO_IDX) {
-		adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX;
-		return 0;
-	}
-
-	if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
-		dev_err(adapter->dev, "firmware dump in progress!!\n");
-		return -EBUSY;
-	}
-
-	if (val->flag == MWIFIEX_FW_DUMP_IDX) {
-		adapter->curr_mem_idx = val->flag;
-		adapter->if_ops.fw_dump(adapter);
-		return 0;
-	}
-
-	if (val->flag < 0 || val->flag >= adapter->num_mem_types)
-		return -EINVAL;
-
-	adapter->curr_mem_idx = val->flag;
-
-	return 0;
-}
-
 const struct ethtool_ops mwifiex_ethtool_ops = {
 	.get_wol = mwifiex_ethtool_get_wol,
 	.set_wol = mwifiex_ethtool_set_wol,
-	.get_dump_flag = mwifiex_get_dump_flag,
-	.get_dump_data = mwifiex_get_dump_data,
-	.set_dump = mwifiex_set_dump,
 };
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 59d8964..cd09051 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -128,6 +128,7 @@
 
 #define TLV_TYPE_UAP_SSID			0x0000
 #define TLV_TYPE_UAP_RATES			0x0001
+#define TLV_TYPE_PWR_CONSTRAINT			0x0020
 
 #define PROPRIETARY_TLV_BASE_ID                 0x0100
 #define TLV_TYPE_KEY_MATERIAL       (PROPRIETARY_TLV_BASE_ID + 0)
@@ -174,6 +175,8 @@
 #define TLV_TYPE_SCAN_CHANNEL_GAP   (PROPRIETARY_TLV_BASE_ID + 197)
 #define TLV_TYPE_API_REV            (PROPRIETARY_TLV_BASE_ID + 199)
 #define TLV_TYPE_CHANNEL_STATS      (PROPRIETARY_TLV_BASE_ID + 198)
+#define TLV_BTCOEX_WL_AGGR_WINSIZE  (PROPRIETARY_TLV_BASE_ID + 202)
+#define TLV_BTCOEX_WL_SCANTIME      (PROPRIETARY_TLV_BASE_ID + 203)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -323,15 +326,18 @@
 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT            0x0075
 #define HostCmd_CMD_802_11_TX_RATE_QUERY              0x007f
 #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS     0x0083
+#define HostCmd_CMD_MEM_ACCESS                        0x0086
 #define HostCmd_CMD_CFG_DATA                          0x008f
 #define HostCmd_CMD_VERSION_EXT                       0x0097
 #define HostCmd_CMD_MEF_CFG                           0x009a
 #define HostCmd_CMD_RSSI_INFO                         0x00a4
 #define HostCmd_CMD_FUNC_INIT                         0x00a9
 #define HostCmd_CMD_FUNC_SHUTDOWN                     0x00aa
+#define HOST_CMD_APCMD_SYS_RESET                      0x00af
 #define HostCmd_CMD_UAP_SYS_CONFIG                    0x00b0
 #define HostCmd_CMD_UAP_BSS_START                     0x00b1
 #define HostCmd_CMD_UAP_BSS_STOP                      0x00b2
+#define HOST_CMD_APCMD_STA_LIST                       0x00b3
 #define HostCmd_CMD_UAP_STA_DEAUTH                    0x00b5
 #define HostCmd_CMD_11N_CFG                           0x00cd
 #define HostCmd_CMD_11N_ADDBA_REQ                     0x00ce
@@ -418,8 +424,12 @@
 #define HS_CFG_COND_MAC_EVENT		0x00000004
 #define HS_CFG_COND_MULTICAST_DATA	0x00000008
 
-#define MWIFIEX_TIMEOUT_FOR_AP_RESP		0xfffc
-#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT	2
+#define CONNECT_ERR_AUTH_ERR_STA_FAILURE	0xFFFB
+#define CONNECT_ERR_ASSOC_ERR_TIMEOUT		0xFFFC
+#define CONNECT_ERR_ASSOC_ERR_AUTH_REFUSED	0xFFFD
+#define CONNECT_ERR_AUTH_MSG_UNHANDLED		0xFFFE
+#define CONNECT_ERR_STA_FAILURE			0xFFFF
+
 
 #define CMD_F_HOSTCMD           (1 << 0)
 #define CMD_F_CANCELED          (1 << 1)
@@ -502,6 +512,7 @@
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_TX_STATUS_REPORT		0x00000074
+#define EVENT_BT_COEX_WLAN_PARA_CHANGE	0X00000076
 
 #define EVENT_ID_MASK                   0xffff
 #define BSS_NUM_MASK                    0xf
@@ -626,7 +637,12 @@
 	__le16 rx_pkt_type;
 	__le16 seq_num;
 	u8 priority;
-	u8 reserved1;
+	u8 rx_rate;
+	s8 snr;
+	s8 nf;
+	u8 ht_info;
+	u8 reserved[3];
+	u8 flags;
 };
 
 struct mwifiex_fw_chan_stats {
@@ -1150,6 +1166,13 @@
 	DOT11H_I = 10,
 };
 
+enum mwifiex_assocmd_failurepoint {
+	MWIFIEX_ASSOC_CMD_SUCCESS = 0,
+	MWIFIEX_ASSOC_CMD_FAILURE_ASSOC,
+	MWIFIEX_ASSOC_CMD_FAILURE_AUTH,
+	MWIFIEX_ASSOC_CMD_FAILURE_JOIN
+};
+
 #define MAX_SNMP_BUF_SIZE   128
 
 struct host_cmd_ds_802_11_snmp_mib {
@@ -1447,6 +1470,18 @@
 	__le16 reason;
 } __packed;
 
+struct mwifiex_ie_types_sta_info {
+	struct mwifiex_ie_types_header header;
+	u8 mac[ETH_ALEN];
+	u8 power_mfg_status;
+	s8 rssi;
+};
+
+struct host_cmd_ds_sta_list {
+	u16 sta_count;
+	u8 tlv[0];
+} __packed;
+
 struct mwifiex_ie_types_pwr_capability {
 	struct mwifiex_ie_types_header header;
 	s8 min_pwr;
@@ -1576,6 +1611,13 @@
 	u8 ext_capab[0];
 } __packed;
 
+struct host_cmd_ds_mem_access {
+	__le16 action;
+	__le16 reserved;
+	__le32 addr;
+	__le32 value;
+};
+
 struct mwifiex_ie_types_qos_info {
 	struct mwifiex_ie_types_header header;
 	u8 qos_info;
@@ -1742,6 +1784,27 @@
 	__le32 sta_ao_timer;
 } __packed;
 
+struct host_cmd_tlv_power_constraint {
+	struct mwifiex_ie_types_header header;
+	u8 constraint;
+} __packed;
+
+struct mwifiex_ie_types_btcoex_scan_time {
+	struct mwifiex_ie_types_header header;
+	u8 coex_scan;
+	u8 reserved;
+	u16 min_scan_time;
+	u16 max_scan_time;
+} __packed;
+
+struct mwifiex_ie_types_btcoex_aggr_win_size {
+	struct mwifiex_ie_types_header header;
+	u8 coex_win_size;
+	u8 tx_win_size;
+	u8 rx_win_size;
+	u8 reserved;
+} __packed;
+
 struct host_cmd_ds_version_ext {
 	u8 version_str_sel;
 	char version_str[128];
@@ -1958,6 +2021,7 @@
 		struct host_cmd_ds_p2p_mode_cfg mode_cfg;
 		struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
 		struct host_cmd_ds_mef_cfg mef_cfg;
+		struct host_cmd_ds_mem_access mem;
 		struct host_cmd_ds_mac_reg_access mac_reg;
 		struct host_cmd_ds_bbp_reg_access bbp_reg;
 		struct host_cmd_ds_rf_reg_access rf_reg;
@@ -1968,6 +2032,7 @@
 		struct host_cmd_ds_802_11_subsc_evt subsc_evt;
 		struct host_cmd_ds_sys_config uap_sys_config;
 		struct host_cmd_ds_sta_deauth sta_deauth;
+		struct host_cmd_ds_sta_list sta_list;
 		struct host_cmd_11ac_vht_cfg vht_cfg;
 		struct host_cmd_ds_coalesce_cfg coalesce_cfg;
 		struct host_cmd_ds_tdls_oper tdls_oper;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index f3b6ed2..0ba8945 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -320,63 +320,81 @@
 /* This function parses  head and tail IEs, from cfg80211_beacon_data and sets
  * these IE to FW.
  */
-static int mwifiex_uap_set_head_tail_ies(struct mwifiex_private *priv,
-					 struct cfg80211_beacon_data *info)
+static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
+				      struct cfg80211_beacon_data *info)
 {
 	struct mwifiex_ie *gen_ie;
-	struct ieee_types_header *rsn_ie = NULL, *wpa_ie = NULL;
-	struct ieee_types_header *chsw_ie = NULL;
+	struct ieee_types_header *hdr;
+	struct ieee80211_vendor_ie *vendorhdr;
 	u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
-	const u8 *vendor_ie;
+	int left_len, parsed_len = 0;
+
+	if (!info->tail || !info->tail_len)
+		return 0;
 
 	gen_ie = kzalloc(sizeof(*gen_ie), GFP_KERNEL);
 	if (!gen_ie)
 		return -ENOMEM;
+
+	left_len = info->tail_len;
+
+	/* Many IEs are generated in FW by parsing bss configuration.
+	 * Let's not add them here; else we may end up duplicating these IEs
+	 */
+	while (left_len > sizeof(struct ieee_types_header)) {
+		hdr = (void *)(info->tail + parsed_len);
+		switch (hdr->element_id) {
+		case WLAN_EID_SSID:
+		case WLAN_EID_SUPP_RATES:
+		case WLAN_EID_COUNTRY:
+		case WLAN_EID_PWR_CONSTRAINT:
+		case WLAN_EID_EXT_SUPP_RATES:
+		case WLAN_EID_HT_CAPABILITY:
+		case WLAN_EID_HT_OPERATION:
+		case WLAN_EID_VHT_CAPABILITY:
+		case WLAN_EID_VHT_OPERATION:
+		case WLAN_EID_VENDOR_SPECIFIC:
+			break;
+		default:
+			memcpy(gen_ie->ie_buffer + ie_len, hdr,
+			       hdr->len + sizeof(struct ieee_types_header));
+			ie_len += hdr->len + sizeof(struct ieee_types_header);
+			break;
+		}
+		left_len -= hdr->len + sizeof(struct ieee_types_header);
+		parsed_len += hdr->len + sizeof(struct ieee_types_header);
+	}
+
+	/* parse only WPA vendor IE from tail, WMM IE is configured by
+	 * bss_config command
+	 */
+	vendorhdr = (void *)cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+						    WLAN_OUI_TYPE_MICROSOFT_WPA,
+						    info->tail, info->tail_len);
+	if (vendorhdr) {
+		memcpy(gen_ie->ie_buffer + ie_len, vendorhdr,
+		       vendorhdr->len + sizeof(struct ieee_types_header));
+		ie_len += vendorhdr->len + sizeof(struct ieee_types_header);
+	}
+
+	if (!ie_len) {
+		kfree(gen_ie);
+		return 0;
+	}
+
 	gen_ie->ie_index = cpu_to_le16(gen_idx);
 	gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
 						MGMT_MASK_PROBE_RESP |
 						MGMT_MASK_ASSOC_RESP);
+	gen_ie->ie_length = cpu_to_le16(ie_len);
 
-	if (info->tail && info->tail_len) {
-		rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
-						  info->tail, info->tail_len);
-		if (rsn_ie) {
-			memcpy(gen_ie->ie_buffer, rsn_ie, rsn_ie->len + 2);
-			ie_len = rsn_ie->len + 2;
-			gen_ie->ie_length = cpu_to_le16(ie_len);
-		}
-
-		vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
-						    WLAN_OUI_TYPE_MICROSOFT_WPA,
-						    info->tail,
-						    info->tail_len);
-		if (vendor_ie) {
-			wpa_ie = (struct ieee_types_header *)vendor_ie;
-			memcpy(gen_ie->ie_buffer + ie_len,
-			       wpa_ie, wpa_ie->len + 2);
-			ie_len += wpa_ie->len + 2;
-			gen_ie->ie_length = cpu_to_le16(ie_len);
-		}
-
-		chsw_ie = (void *)cfg80211_find_ie(WLAN_EID_CHANNEL_SWITCH,
-						   info->tail, info->tail_len);
-		if (chsw_ie) {
-			memcpy(gen_ie->ie_buffer + ie_len,
-			       chsw_ie, chsw_ie->len + 2);
-			ie_len += chsw_ie->len + 2;
-			gen_ie->ie_length = cpu_to_le16(ie_len);
-		}
+	if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
+					 NULL, NULL)) {
+		kfree(gen_ie);
+		return -1;
 	}
 
-	if (rsn_ie || wpa_ie || chsw_ie) {
-		if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL,
-						 NULL, NULL, NULL)) {
-			kfree(gen_ie);
-			return -1;
-		}
-		priv->gen_idx = gen_idx;
-	}
-
+	priv->gen_idx = gen_idx;
 	kfree(gen_ie);
 	return 0;
 }
@@ -390,7 +408,7 @@
 {
 	int ret;
 
-	ret = mwifiex_uap_set_head_tail_ies(priv, info);
+	ret = mwifiex_uap_parse_tail_ies(priv, info);
 		return ret;
 
 	return mwifiex_set_mgmt_beacon_data_ies(priv, info);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index e12192f..df7fdc0 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -56,7 +56,7 @@
 {
 	struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
 
-	dev_err(adapter->dev, "Firmware wakeup failed\n");
+	mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n");
 	adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
 	mwifiex_cancel_all_pending_cmd(adapter);
 
@@ -172,8 +172,9 @@
 	/* Allocate command buffer */
 	ret = mwifiex_alloc_cmd_buffer(adapter);
 	if (ret) {
-		dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
-			__func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: failed to alloc cmd buffer\n",
+			    __func__);
 		return -1;
 	}
 
@@ -182,8 +183,9 @@
 			      + INTF_HEADER_LEN);
 
 	if (!adapter->sleep_cfm) {
-		dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
-			" cmd buffer\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: failed to alloc sleep cfm\t"
+			    " cmd buffer\n", __func__);
 		return -1;
 	}
 	skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN);
@@ -417,7 +419,7 @@
 	mwifiex_free_lock_list(adapter);
 
 	/* Free command buffer */
-	dev_dbg(adapter->dev, "info: free cmd buffer\n");
+	mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");
 	mwifiex_free_cmd_buffer(adapter);
 
 	for (idx = 0; idx < adapter->num_mem_types; idx++) {
@@ -433,6 +435,7 @@
 
 	if (adapter->drv_info_dump) {
 		vfree(adapter->drv_info_dump);
+		adapter->drv_info_dump = NULL;
 		adapter->drv_info_size = 0;
 	}
 
@@ -595,10 +598,11 @@
 	for (i = 0; i < adapter->priv_num; ++i) {
 		head = &adapter->bss_prio_tbl[i].bss_prio_head;
 		lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
-		dev_dbg(adapter->dev, "info: delete BSS priority table,"
-				" bss_type = %d, bss_num = %d, i = %d,"
-				" head = %p\n",
-			      priv->bss_type, priv->bss_num, i, head);
+		mwifiex_dbg(adapter, INFO,
+			    "info: delete BSS priority table,\t"
+			    "bss_type = %d, bss_num = %d, i = %d,\t"
+			    "head = %p\n",
+			    priv->bss_type, priv->bss_num, i, head);
 
 		{
 			spin_lock_irqsave(lock, flags);
@@ -609,9 +613,10 @@
 			list_for_each_entry_safe(bssprio_node, tmp_node, head,
 						 list) {
 				if (bssprio_node->priv == priv) {
-					dev_dbg(adapter->dev, "info: Delete "
-						"node %p, next = %p\n",
-						bssprio_node, tmp_node);
+					mwifiex_dbg(adapter, INFO,
+						    "info: Delete\t"
+						    "node %p, next = %p\n",
+						    bssprio_node, tmp_node);
 					list_del(&bssprio_node->list);
 					kfree(bssprio_node);
 				}
@@ -659,20 +664,23 @@
 	adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
 	/* wait for mwifiex_process to complete */
 	if (adapter->mwifiex_processing) {
-		dev_warn(adapter->dev, "main process is still running\n");
+		mwifiex_dbg(adapter, WARN,
+			    "main process is still running\n");
 		return ret;
 	}
 
 	/* cancel current command */
 	if (adapter->curr_cmd) {
-		dev_warn(adapter->dev, "curr_cmd is still in processing\n");
+		mwifiex_dbg(adapter, WARN,
+			    "curr_cmd is still in processing\n");
 		del_timer_sync(&adapter->cmd_timer);
 		mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
 		adapter->curr_cmd = NULL;
 	}
 
 	/* shut down mwifiex */
-	dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
+	mwifiex_dbg(adapter, MSG,
+		    "info: shutdown mwifiex...\n");
 
 	/* Clean up Tx/Rx queues and delete BSS priority table */
 	for (i = 0; i < adapter->priv_num; i++) {
@@ -741,8 +749,8 @@
 		/* check if firmware is already running */
 		ret = adapter->if_ops.check_fw_status(adapter, poll_num);
 		if (!ret) {
-			dev_notice(adapter->dev,
-				   "WLAN FW already running! Skip FW dnld\n");
+			mwifiex_dbg(adapter, MSG,
+				    "WLAN FW already running! Skip FW dnld\n");
 			return 0;
 		}
 
@@ -750,8 +758,8 @@
 
 		/* check if we are the winner for downloading FW */
 		if (!adapter->winner) {
-			dev_notice(adapter->dev,
-				   "FW already running! Skip FW dnld\n");
+			mwifiex_dbg(adapter, MSG,
+				    "FW already running! Skip FW dnld\n");
 			goto poll_fw;
 		}
 	}
@@ -760,7 +768,8 @@
 		/* Download firmware with helper */
 		ret = adapter->if_ops.prog_fw(adapter, pmfw);
 		if (ret) {
-			dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret);
+			mwifiex_dbg(adapter, ERROR,
+				    "prog_fw failed ret=%#x\n", ret);
 			return ret;
 		}
 	}
@@ -769,7 +778,8 @@
 	/* Check if the firmware is downloaded successfully or not */
 	ret = adapter->if_ops.check_fw_status(adapter, poll_num);
 	if (ret)
-		dev_err(adapter->dev, "FW failed to be active in time\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "FW failed to be active in time\n");
 
 	return ret;
 }
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d2b05c3..4f0174c 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -113,6 +113,7 @@
 	u32 sta_ao_timer;
 	u32 ps_sta_ao_timer;
 	u8 qos_info;
+	u8 power_constraint;
 	struct mwifiex_types_wmm_info wmm_info;
 };
 
@@ -189,6 +190,7 @@
 };
 
 struct mwifiex_debug_info {
+	unsigned int debug_mask;
 	u32 int_counter;
 	u32 packets_out[MAX_NUM_TID];
 	u32 tx_buf_size;
@@ -342,6 +344,11 @@
 	u8 value[MAX_EEPROM_DATA];
 };
 
+struct mwifiex_ds_mem_rw {
+	u32 addr;
+	u32 value;
+};
+
 #define IEEE_MAX_IE_SIZE		256
 
 #define MWIFIEX_IE_HDR_SIZE	(sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE)
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 411a6c2..56b024a 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -53,9 +53,9 @@
 	 *   parameter buffer pointer.
 	 */
 	if (priv->gen_ie_buf_len) {
-		dev_dbg(priv->adapter->dev,
-			"info: %s: append generic ie len %d to %p\n",
-			__func__, priv->gen_ie_buf_len, *buffer);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: %s: append generic ie len %d to %p\n",
+			    __func__, priv->gen_ie_buf_len, *buffer);
 
 		/* Wrap the generic IE buffer with a pass through TLV type */
 		ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
@@ -125,9 +125,9 @@
 
 	tsf_val = cpu_to_le64(bss_desc->timestamp);
 
-	dev_dbg(priv->adapter->dev,
-		"info: %s: TSF offset calc: %016llx - %016llx\n",
-		__func__, bss_desc->timestamp, bss_desc->fw_tsf);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: %s: TSF offset calc: %016llx - %016llx\n",
+		    __func__, bss_desc->timestamp, bss_desc->fw_tsf);
 
 	memcpy(*buffer, &tsf_val, sizeof(tsf_val));
 	*buffer += sizeof(tsf_val);
@@ -152,7 +152,7 @@
 
 	tmp = kmemdup(rate1, rate1_size, GFP_KERNEL);
 	if (!tmp) {
-		dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
+		mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n");
 		return -ENOMEM;
 	}
 
@@ -169,8 +169,8 @@
 		}
 	}
 
-	dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
-		priv->data_rate);
+	mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n",
+		    priv->data_rate);
 
 	if (!priv->is_data_rate_auto) {
 		while (*ptr) {
@@ -180,9 +180,10 @@
 			}
 			ptr++;
 		}
-		dev_err(priv->adapter->dev, "previously set fixed data rate %#x"
-			" is not compatible with the network\n",
-			priv->data_rate);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "previously set fixed data rate %#x\t"
+			    "is not compatible with the network\n",
+			    priv->data_rate);
 
 		ret = -1;
 		goto done;
@@ -214,8 +215,9 @@
 	if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
 				     card_rates, card_rates_size)) {
 		*out_rates_size = 0;
-		dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
-			__func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: cannot get common rates\n",
+			    __func__);
 		return -1;
 	}
 
@@ -246,8 +248,9 @@
 	 * parameter buffer pointer.
 	 */
 	if (priv->wps_ie_len) {
-		dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
-			priv->wps_ie_len, *buffer);
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: append wps ie %d to %p\n",
+			    priv->wps_ie_len, *buffer);
 
 		/* Wrap the generic IE buffer with a pass through TLV type */
 		ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
@@ -292,8 +295,9 @@
 	 *   parameter buffer pointer.
 	 */
 	if (priv->wapi_ie_len) {
-		dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
-			priv->wapi_ie_len, *buffer);
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: append wapi ie %d to %p\n",
+			    priv->wapi_ie_len, *buffer);
 
 		/* Wrap the generic IE buffer with a pass through TLV type */
 		ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
@@ -453,8 +457,8 @@
 	rates_tlv->header.len = cpu_to_le16((u16) rates_size);
 	memcpy(rates_tlv->rates, rates, rates_size);
 	pos += sizeof(rates_tlv->header) + rates_size;
-	dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
-		rates_size);
+	mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n",
+		    rates_size);
 
 	/* Add the Authentication type to be used for Auth frames */
 	auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
@@ -487,14 +491,14 @@
 		       sizeof(struct mwifiex_chan_scan_param_set));
 		chan_tlv->chan_scan_param[0].chan_number =
 			(bss_desc->phy_param_set.ds_param_set.current_chan);
-		dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
-			chan_tlv->chan_scan_param[0].chan_number);
+		mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n",
+			    chan_tlv->chan_scan_param[0].chan_number);
 
 		chan_tlv->chan_scan_param[0].radio_type =
 			mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
 
-		dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
-			chan_tlv->chan_scan_param[0].radio_type);
+		mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n",
+			    chan_tlv->chan_scan_param[0].radio_type);
 		pos += sizeof(chan_tlv->header) +
 			sizeof(struct mwifiex_chan_scan_param_set);
 	}
@@ -544,13 +548,31 @@
 		tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
 
 	tmp_cap &= CAPINFO_MASK;
-	dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
-		tmp_cap, CAPINFO_MASK);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+		    tmp_cap, CAPINFO_MASK);
 	assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
 
 	return 0;
 }
 
+static const char *assoc_failure_reason_to_str(u16 cap_info)
+{
+	switch (cap_info) {
+	case CONNECT_ERR_AUTH_ERR_STA_FAILURE:
+		return "CONNECT_ERR_AUTH_ERR_STA_FAILURE";
+	case CONNECT_ERR_AUTH_MSG_UNHANDLED:
+		return "CONNECT_ERR_AUTH_MSG_UNHANDLED";
+	case CONNECT_ERR_ASSOC_ERR_TIMEOUT:
+		return "CONNECT_ERR_ASSOC_ERR_TIMEOUT";
+	case CONNECT_ERR_ASSOC_ERR_AUTH_REFUSED:
+		return "CONNECT_ERR_ASSOC_ERR_AUTH_REFUSED";
+	case CONNECT_ERR_STA_FAILURE:
+		return "CONNECT_ERR_STA_FAILURE";
+	}
+
+	return "Unknown connect failure";
+}
 /*
  * Association firmware command response handler
  *
@@ -621,29 +643,48 @@
 	struct ieee_types_assoc_rsp *assoc_rsp;
 	struct mwifiex_bssdescriptor *bss_desc;
 	bool enable_data = true;
-	u16 cap_info, status_code;
+	u16 cap_info, status_code, aid;
 
 	assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
 
 	cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
 	status_code = le16_to_cpu(assoc_rsp->status_code);
+	aid = le16_to_cpu(assoc_rsp->a_id);
+
+	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
+		dev_err(priv->adapter->dev,
+			"invalid AID value 0x%x; bits 15:14 not set\n",
+			aid);
+
+	aid &= ~(BIT(15) | BIT(14));
 
 	priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
 				   sizeof(priv->assoc_rsp_buf));
 
 	memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
 
+	assoc_rsp->a_id = cpu_to_le16(aid);
+
 	if (status_code) {
 		priv->adapter->dbg.num_cmd_assoc_failure++;
-		dev_err(priv->adapter->dev,
-			"ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n",
-			status_code, cap_info, le16_to_cpu(assoc_rsp->a_id));
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "ASSOC_RESP: failed,\t"
+			    "status code=%d err=%#x a_id=%#x\n",
+			    status_code, cap_info,
+			    le16_to_cpu(assoc_rsp->a_id));
 
-		if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) {
-			if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT)
+		mwifiex_dbg(priv->adapter, ERROR, "assoc failure: reason %s\n",
+			    assoc_failure_reason_to_str(cap_info));
+		if (cap_info == CONNECT_ERR_ASSOC_ERR_TIMEOUT) {
+			if (status_code == MWIFIEX_ASSOC_CMD_FAILURE_AUTH) {
 				ret = WLAN_STATUS_AUTH_TIMEOUT;
-			else
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "ASSOC_RESP: AUTH timeout\n");
+			} else {
 				ret = WLAN_STATUS_UNSPECIFIED_FAILURE;
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "ASSOC_RESP: UNSPECIFIED failure\n");
+			}
 		} else {
 			ret = status_code;
 		}
@@ -661,8 +702,8 @@
 	/* Set the attempted BSSID Index to current */
 	bss_desc = priv->attempted_bss_desc;
 
-	dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
-		bss_desc->ssid.ssid);
+	mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n",
+		    bss_desc->ssid.ssid);
 
 	/* Make a copy of current BSSID descriptor */
 	memcpy(&priv->curr_bss_params.bss_descriptor,
@@ -692,8 +733,9 @@
 			= ((bss_desc->wmm_ie.qos_info_bitmap &
 				IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
 
-	dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
-		priv->curr_pkt_filter);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
+		    priv->curr_pkt_filter);
 	if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
 		priv->wpa_is_gtk_set = false;
 
@@ -709,8 +751,8 @@
 	}
 
 	if (enable_data)
-		dev_dbg(priv->adapter->dev,
-			"info: post association, re-enabling data flow\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: post association, re-enabling data flow\n");
 
 	/* Reset SNR/NF/RSSI values */
 	priv->data_rssi_last = 0;
@@ -728,7 +770,7 @@
 
 	priv->adapter->dbg.num_cmd_assoc_success++;
 
-	dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n");
+	mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
 
 	/* Add the ra_list here for infra mode as there will be only 1 ra
 	   always */
@@ -815,8 +857,8 @@
 
 	memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
 
-	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
-		adhoc_start->ssid);
+	mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
+		    adhoc_start->ssid);
 
 	memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
 	memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
@@ -848,12 +890,14 @@
 	}
 
 	if (!priv->adhoc_channel) {
-		dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
-		priv->adhoc_channel);
+	mwifiex_dbg(adapter, INFO,
+		    "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
+		    priv->adhoc_channel);
 
 	priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
 	priv->curr_bss_params.band = adapter->adhoc_start_band;
@@ -885,13 +929,14 @@
 	/* Set up privacy in bss_desc */
 	if (priv->sec_info.encryption_mode) {
 		/* Ad-Hoc capability privacy on */
-		dev_dbg(adapter->dev,
-			"info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
 		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
 		tmp_cap |= WLAN_CAPABILITY_PRIVACY;
 	} else {
-		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
-				" setting privacy to ACCEPT ALL\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: ADHOC_S_CMD: wep_status NOT set,\t"
+			    "setting privacy to ACCEPT ALL\n");
 		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
 	}
 
@@ -902,8 +947,8 @@
 		if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
 				     HostCmd_ACT_GEN_SET, 0,
 				     &priv->curr_pkt_filter, false)) {
-			dev_err(adapter->dev,
-				"ADHOC_S_CMD: G Protection config failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "ADHOC_S_CMD: G Protection config failed\n");
 			return -1;
 		}
 	}
@@ -918,10 +963,10 @@
 	memcpy(&priv->curr_bss_params.data_rates,
 	       &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
 
-	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
-		adhoc_start->data_rate);
+	mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n",
+		    adhoc_start->data_rate);
 
-	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
+	mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
 
 	if (IS_SUPPORT_MULTI_BANDS(adapter)) {
 		/* Append a channel TLV */
@@ -935,8 +980,8 @@
 		chan_tlv->chan_scan_param[0].chan_number =
 			(u8) priv->curr_bss_params.bss_descriptor.channel;
 
-		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
-			chan_tlv->chan_scan_param[0].chan_number);
+		mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n",
+			    chan_tlv->chan_scan_param[0].chan_number);
 
 		chan_tlv->chan_scan_param[0].radio_type
 		       = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
@@ -951,8 +996,8 @@
 				chan_tlv->chan_scan_param[0].radio_type |=
 					(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
 		}
-		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
-			chan_tlv->chan_scan_param[0].radio_type);
+		mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n",
+			    chan_tlv->chan_scan_param[0].radio_type);
 		pos += sizeof(chan_tlv->header) +
 			sizeof(struct mwifiex_chan_scan_param_set);
 		cmd_append_size +=
@@ -1074,8 +1119,8 @@
 		if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
 				     HostCmd_ACT_GEN_SET, 0,
 				     &curr_pkt_filter, false)) {
-			dev_err(priv->adapter->dev,
-				"ADHOC_J_CMD: G Protection config failed\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "ADHOC_J_CMD: G Protection config failed\n");
 			return -1;
 		}
 	}
@@ -1106,14 +1151,15 @@
 
 	tmp_cap &= CAPINFO_MASK;
 
-	dev_dbg(priv->adapter->dev,
-		"info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
-		tmp_cap, CAPINFO_MASK);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+		    tmp_cap, CAPINFO_MASK);
 
 	/* Information on BSSID descriptor passed to FW */
-	dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
-		adhoc_join->bss_descriptor.bssid,
-		adhoc_join->bss_descriptor.ssid);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
+		    adhoc_join->bss_descriptor.bssid,
+		    adhoc_join->bss_descriptor.ssid);
 
 	for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
 		    bss_desc->supported_rates[i]; i++)
@@ -1149,14 +1195,14 @@
 		       sizeof(struct mwifiex_chan_scan_param_set));
 		chan_tlv->chan_scan_param[0].chan_number =
 			(bss_desc->phy_param_set.ds_param_set.current_chan);
-		dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n",
-			chan_tlv->chan_scan_param[0].chan_number);
+		mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n",
+			    chan_tlv->chan_scan_param[0].chan_number);
 
 		chan_tlv->chan_scan_param[0].radio_type =
 			mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
 
-		dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n",
-			chan_tlv->chan_scan_param[0].radio_type);
+		mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n",
+			    chan_tlv->chan_scan_param[0].radio_type);
 		pos += sizeof(chan_tlv->header) +
 				sizeof(struct mwifiex_chan_scan_param_set);
 		cmd_append_size += sizeof(chan_tlv->header) +
@@ -1210,7 +1256,7 @@
 	/* Join result code 0 --> SUCCESS */
 	reason_code = le16_to_cpu(resp->result);
 	if (reason_code) {
-		dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
+		mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
 		if (priv->media_connected)
 			mwifiex_reset_connect_state(priv, reason_code);
 
@@ -1225,8 +1271,8 @@
 	priv->media_connected = true;
 
 	if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
-		dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
-			bss_desc->ssid.ssid);
+		mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n",
+			    bss_desc->ssid.ssid);
 
 		/* Update the created network descriptor with the new BSSID */
 		memcpy(bss_desc->mac_address,
@@ -1238,8 +1284,9 @@
 		 * Now the join cmd should be successful.
 		 * If BSSID has changed use SSID to compare instead of BSSID
 		 */
-		dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
-			bss_desc->ssid.ssid);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: ADHOC_J_RESP %s\n",
+			    bss_desc->ssid.ssid);
 
 		/*
 		 * Make a copy of current BSSID descriptor, only needed for
@@ -1252,10 +1299,10 @@
 		priv->adhoc_state = ADHOC_JOINED;
 	}
 
-	dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
-		priv->adhoc_channel);
-	dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
-		priv->curr_bss_params.bss_descriptor.mac_address);
+	mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n",
+		    priv->adhoc_channel);
+	mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n",
+		    priv->curr_bss_params.bss_descriptor.mac_address);
 
 	if (!netif_carrier_ok(priv->netdev))
 		netif_carrier_on(priv->netdev);
@@ -1317,12 +1364,12 @@
 mwifiex_adhoc_start(struct mwifiex_private *priv,
 		    struct cfg80211_ssid *adhoc_ssid)
 {
-	dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
-		priv->adhoc_channel);
-	dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
-		priv->curr_bss_params.bss_descriptor.channel);
-	dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
-		priv->curr_bss_params.band);
+	mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n",
+		    priv->adhoc_channel);
+	mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n",
+		    priv->curr_bss_params.bss_descriptor.channel);
+	mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n",
+		    priv->curr_bss_params.band);
 
 	if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
 	    priv->adapter->config_bands & BAND_AAC)
@@ -1343,14 +1390,16 @@
 int mwifiex_adhoc_join(struct mwifiex_private *priv,
 		       struct mwifiex_bssdescriptor *bss_desc)
 {
-	dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
-		priv->curr_bss_params.bss_descriptor.ssid.ssid);
-	dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
-		priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
-	dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
-		bss_desc->ssid.ssid);
-	dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
-		bss_desc->ssid.ssid_len);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: adhoc join: curr_bss ssid =%s\n",
+		    priv->curr_bss_params.bss_descriptor.ssid.ssid);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: adhoc join: curr_bss ssid_len =%u\n",
+		    priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+	mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n",
+		    bss_desc->ssid.ssid);
+	mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n",
+		    bss_desc->ssid.ssid_len);
 
 	/* Check if the requested SSID is already joined */
 	if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
@@ -1358,8 +1407,9 @@
 			      &priv->curr_bss_params.bss_descriptor.ssid) &&
 	    (priv->curr_bss_params.bss_descriptor.bss_mode ==
 							NL80211_IFTYPE_ADHOC)) {
-		dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID"
-			" is the same as current; not attempting to re-join\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: ADHOC_J_CMD: new ad-hoc SSID\t"
+			    "is the same as current; not attempting to re-join\n");
 		return -1;
 	}
 
@@ -1370,10 +1420,12 @@
 	else
 		mwifiex_set_ba_params(priv);
 
-	dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
-		priv->curr_bss_params.bss_descriptor.channel);
-	dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
-		priv->curr_bss_params.band);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: curr_bss_params.channel = %d\n",
+		    priv->curr_bss_params.bss_descriptor.channel);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: curr_bss_params.band = %c\n",
+		    priv->curr_bss_params.band);
 
 	return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
 				HostCmd_ACT_GEN_SET, 0, bss_desc, true);
@@ -1421,7 +1473,7 @@
 		ret = mwifiex_deauthenticate_infra(priv, mac);
 		if (ret)
 			cfg80211_disconnected(priv->netdev, 0, NULL, 0,
-					      GFP_KERNEL);
+					      true, GFP_KERNEL);
 		break;
 	case NL80211_IFTYPE_ADHOC:
 		return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 03a95c7..3ba4e0e 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -24,6 +24,10 @@
 
 #define VERSION	"1.0"
 
+static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
+module_param(debug_mask, uint, 0);
+MODULE_PARM_DESC(debug_mask, "bitmap for debug flags");
+
 const char driver_version[] = "mwifiex " VERSION " (%s) ";
 static char *cal_data_cfg;
 module_param(cal_data_cfg, charp, 0);
@@ -63,6 +67,7 @@
 
 	/* Save interface specific operations in adapter */
 	memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
+	adapter->debug_mask = debug_mask;
 
 	/* card specific initialization has been deferred until now .. */
 	if (adapter->if_ops.init_if)
@@ -89,7 +94,8 @@
 	return 0;
 
 error:
-	dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
+	mwifiex_dbg(adapter, ERROR,
+		    "info: leave mwifiex_register with error\n");
 
 	for (i = 0; i < adapter->priv_num; i++)
 		kfree(adapter->priv[i]);
@@ -231,11 +237,10 @@
 		goto exit_main_proc;
 	} else {
 		adapter->mwifiex_processing = true;
+		spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 	}
 process_start:
 	do {
-		adapter->more_task_flag = false;
-		spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 		if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
 		    (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
 			break;
@@ -275,7 +280,6 @@
 			adapter->pm_wakeup_fw_try = true;
 			mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
 			adapter->if_ops.wakeup(adapter);
-			spin_lock_irqsave(&adapter->main_proc_lock, flags);
 			continue;
 		}
 
@@ -335,7 +339,6 @@
 		    (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
 		    (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
 		    adapter->tx_lock_flag){
-			spin_lock_irqsave(&adapter->main_proc_lock, flags);
 			continue;
 		}
 
@@ -386,12 +389,14 @@
 			}
 			break;
 		}
-		spin_lock_irqsave(&adapter->main_proc_lock, flags);
 	} while (true);
 
 	spin_lock_irqsave(&adapter->main_proc_lock, flags);
-	if (adapter->more_task_flag)
+	if (adapter->more_task_flag) {
+		adapter->more_task_flag = false;
+		spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 		goto process_start;
+	}
 	adapter->mwifiex_processing = false;
 	spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
@@ -455,8 +460,8 @@
 	struct wireless_dev *wdev;
 
 	if (!firmware) {
-		dev_err(adapter->dev,
-			"Failed to get firmware %s\n", adapter->fw_name);
+		mwifiex_dbg(adapter, ERROR,
+			    "Failed to get firmware %s\n", adapter->fw_name);
 		goto err_dnld_fw;
 	}
 
@@ -472,13 +477,13 @@
 	if (ret == -1)
 		goto err_dnld_fw;
 
-	dev_notice(adapter->dev, "WLAN FW is active\n");
+	mwifiex_dbg(adapter, MSG, "WLAN FW is active\n");
 
 	if (cal_data_cfg) {
 		if ((request_firmware(&adapter->cal_data, cal_data_cfg,
 				      adapter->dev)) < 0)
-			dev_err(adapter->dev,
-				"Cal data request_firmware() failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Cal data request_firmware() failed\n");
 	}
 
 	/* enable host interrupt after fw dnld is successful */
@@ -503,12 +508,14 @@
 
 	priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
 	if (mwifiex_register_cfg80211(adapter)) {
-		dev_err(adapter->dev, "cannot register with cfg80211\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot register with cfg80211\n");
 		goto err_init_fw;
 	}
 
 	if (mwifiex_init_channel_scan_gap(adapter)) {
-		dev_err(adapter->dev, "could not init channel stats table\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "could not init channel stats table\n");
 		goto err_init_fw;
 	}
 
@@ -522,7 +529,8 @@
 	wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
 					NL80211_IFTYPE_STATION, NULL, NULL);
 	if (IS_ERR(wdev)) {
-		dev_err(adapter->dev, "cannot create default STA interface\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot create default STA interface\n");
 		rtnl_unlock();
 		goto err_add_intf;
 	}
@@ -531,7 +539,8 @@
 		wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
 						NL80211_IFTYPE_AP, NULL, NULL);
 		if (IS_ERR(wdev)) {
-			dev_err(adapter->dev, "cannot create AP interface\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot create AP interface\n");
 			rtnl_unlock();
 			goto err_add_intf;
 		}
@@ -542,8 +551,8 @@
 						NL80211_IFTYPE_P2P_CLIENT, NULL,
 						NULL);
 		if (IS_ERR(wdev)) {
-			dev_err(adapter->dev,
-				"cannot create p2p client interface\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cannot create p2p client interface\n");
 			rtnl_unlock();
 			goto err_add_intf;
 		}
@@ -551,7 +560,7 @@
 	rtnl_unlock();
 
 	mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
-	dev_notice(adapter->dev, "driver_version = %s\n", fmt);
+	mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
 	goto done;
 
 err_add_intf:
@@ -561,7 +570,8 @@
 	if (adapter->if_ops.disable_int)
 		adapter->if_ops.disable_int(adapter);
 err_dnld_fw:
-	pr_debug("info: %s: unregister device\n", __func__);
+	mwifiex_dbg(adapter, ERROR,
+		    "info: %s: unregister device\n", __func__);
 	if (adapter->if_ops.unregister_dev)
 		adapter->if_ops.unregister_dev(adapter);
 
@@ -602,8 +612,8 @@
 				      adapter->dev, GFP_KERNEL, adapter,
 				      mwifiex_fw_dpc);
 	if (ret < 0)
-		dev_err(adapter->dev,
-			"request_firmware_nowait() returned error %d\n", ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "request_firmware_nowait error %d\n", ret);
 	return ret;
 }
 
@@ -629,7 +639,8 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
 	if (priv->scan_request) {
-		dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "aborting scan on ndo_stop\n");
 		cfg80211_scan_done(priv->scan_request, 1);
 		priv->scan_request = NULL;
 		priv->scan_aborting = true;
@@ -650,7 +661,8 @@
 		txq = netdev_get_tx_queue(priv->netdev, index);
 		if (!netif_tx_queue_stopped(txq)) {
 			netif_tx_stop_queue(txq);
-			dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
+			mwifiex_dbg(priv->adapter, DATA,
+				    "stop queue: %d\n", index);
 		}
 	}
 
@@ -715,8 +727,9 @@
 	struct mwifiex_txinfo *tx_info;
 	bool multicast;
 
-	dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
-		jiffies, priv->bss_type, priv->bss_num);
+	mwifiex_dbg(priv->adapter, DATA,
+		    "data: %lu BSS(%d-%d): Data <= kernel\n",
+		    jiffies, priv->bss_type, priv->bss_num);
 
 	if (priv->adapter->surprise_removed) {
 		kfree_skb(skb);
@@ -724,28 +737,31 @@
 		return 0;
 	}
 	if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
-		dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Tx: bad skb len %d\n", skb->len);
 		kfree_skb(skb);
 		priv->stats.tx_dropped++;
 		return 0;
 	}
 	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
-		dev_dbg(priv->adapter->dev,
-			"data: Tx: insufficient skb headroom %d\n",
-			skb_headroom(skb));
+		mwifiex_dbg(priv->adapter, DATA,
+			    "data: Tx: insufficient skb headroom %d\n",
+			    skb_headroom(skb));
 		/* Insufficient skb headroom - allocate a new skb */
 		new_skb =
 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
 		if (unlikely(!new_skb)) {
-			dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Tx: cannot alloca new_skb\n");
 			kfree_skb(skb);
 			priv->stats.tx_dropped++;
 			return 0;
 		}
 		kfree_skb(skb);
 		skb = new_skb;
-		dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
-			skb_headroom(skb));
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: new skb headroomd %d\n",
+			    skb_headroom(skb));
 	}
 
 	tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -803,8 +819,8 @@
 	if (!ret)
 		memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
 	else
-		dev_err(priv->adapter->dev,
-			"set mac address failed: ret=%d\n", ret);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "set mac address failed: ret=%d\n", ret);
 
 	memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
 
@@ -842,20 +858,22 @@
 
 	priv->num_tx_timeout++;
 	priv->tx_timeout_cnt++;
-	dev_err(priv->adapter->dev,
-		"%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
-		jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
+	mwifiex_dbg(priv->adapter, ERROR,
+		    "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
+		    jiffies, priv->tx_timeout_cnt, priv->bss_type,
+		    priv->bss_num);
 	mwifiex_set_trans_start(dev);
 
 	if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
 	    priv->adapter->if_ops.card_reset) {
-		dev_err(priv->adapter->dev,
-			"tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tx_timeout_cnt exceeds threshold.\t"
+			    "Triggering card reset!\n");
 		priv->adapter->if_ops.card_reset(priv->adapter);
 	}
 }
 
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
 {
 	void *p;
 	char drv_version[64];
@@ -868,10 +886,11 @@
 
 	if (adapter->drv_info_dump) {
 		vfree(adapter->drv_info_dump);
+		adapter->drv_info_dump = NULL;
 		adapter->drv_info_size = 0;
 	}
 
-	dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n");
+	mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
 
 	adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
 
@@ -939,12 +958,12 @@
 	}
 
 	if (adapter->iface_type == MWIFIEX_SDIO) {
-		p += sprintf(p, "\n=== SDIO register DUMP===\n");
+		p += sprintf(p, "\n=== SDIO register dump===\n");
 		if (adapter->if_ops.reg_dump)
 			p += adapter->if_ops.reg_dump(adapter, p);
 	}
 
-	p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n");
+	p += sprintf(p, "\n=== more debug information\n");
 	debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
 	if (debug_info) {
 		for (i = 0; i < adapter->priv_num; i++) {
@@ -959,9 +978,99 @@
 	}
 
 	adapter->drv_info_size = p - adapter->drv_info_dump;
-	dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n");
+	mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
 }
-EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info);
+EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
+
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
+{
+	u8 idx, *dump_data, *fw_dump_ptr;
+	u32 dump_len;
+
+	dump_len = (strlen("========Start dump driverinfo========\n") +
+		       adapter->drv_info_size +
+		       strlen("\n========End dump========\n"));
+
+	for (idx = 0; idx < adapter->num_mem_types; idx++) {
+		struct memory_type_mapping *entry =
+				&adapter->mem_type_mapping_tbl[idx];
+
+		if (entry->mem_ptr) {
+			dump_len += (strlen("========Start dump ") +
+					strlen(entry->mem_name) +
+					strlen("========\n") +
+					(entry->mem_size + 1) +
+					strlen("\n========End dump========\n"));
+		}
+	}
+
+	dump_data = vzalloc(dump_len + 1);
+	if (!dump_data)
+		goto done;
+
+	fw_dump_ptr = dump_data;
+
+	/* Dump all the memory data into single file, a userspace script will
+	 * be used to split all the memory data to multiple files
+	 */
+	mwifiex_dbg(adapter, MSG,
+		    "== mwifiex dump information to /sys/class/devcoredump start");
+
+	strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
+	fw_dump_ptr += strlen("========Start dump driverinfo========\n");
+	memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size);
+	fw_dump_ptr += adapter->drv_info_size;
+	strcpy(fw_dump_ptr, "\n========End dump========\n");
+	fw_dump_ptr += strlen("\n========End dump========\n");
+
+	for (idx = 0; idx < adapter->num_mem_types; idx++) {
+		struct memory_type_mapping *entry =
+					&adapter->mem_type_mapping_tbl[idx];
+
+		if (entry->mem_ptr) {
+			strcpy(fw_dump_ptr, "========Start dump ");
+			fw_dump_ptr += strlen("========Start dump ");
+
+			strcpy(fw_dump_ptr, entry->mem_name);
+			fw_dump_ptr += strlen(entry->mem_name);
+
+			strcpy(fw_dump_ptr, "========\n");
+			fw_dump_ptr += strlen("========\n");
+
+			memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size);
+			fw_dump_ptr += entry->mem_size;
+
+			strcpy(fw_dump_ptr, "\n========End dump========\n");
+			fw_dump_ptr += strlen("\n========End dump========\n");
+		}
+	}
+
+	/* device dump data will be free in device coredump release function
+	 * after 5 min
+	 */
+	dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
+	mwifiex_dbg(adapter, MSG,
+		    "== mwifiex dump information to /sys/class/devcoredump end");
+
+done:
+	for (idx = 0; idx < adapter->num_mem_types; idx++) {
+		struct memory_type_mapping *entry =
+			&adapter->mem_type_mapping_tbl[idx];
+
+		if (entry->mem_ptr) {
+			vfree(entry->mem_ptr);
+			entry->mem_ptr = NULL;
+		}
+		entry->mem_size = 0;
+	}
+
+	if (adapter->drv_info_dump) {
+		vfree(adapter->drv_info_dump);
+		adapter->drv_info_dump = NULL;
+		adapter->drv_info_size = 0;
+	}
+}
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
 
 /*
  * CFG802.11 network device handler for statistics retrieval.
@@ -1230,21 +1339,24 @@
 		}
 	}
 
-	dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n");
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: calling mwifiex_shutdown_drv...\n");
 	adapter->init_wait_q_woken = false;
 
 	if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
 		wait_event_interruptible(adapter->init_wait_q,
 					 adapter->init_wait_q_woken);
-	dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n");
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: mwifiex_shutdown_drv done\n");
 	if (atomic_read(&adapter->rx_pending) ||
 	    atomic_read(&adapter->tx_pending) ||
 	    atomic_read(&adapter->cmd_pending)) {
-		dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, "
-		       "cmd_pending=%d\n",
-		       atomic_read(&adapter->rx_pending),
-		       atomic_read(&adapter->tx_pending),
-		       atomic_read(&adapter->cmd_pending));
+		mwifiex_dbg(adapter, ERROR,
+			    "rx_pending=%d, tx_pending=%d,\t"
+			    "cmd_pending=%d\n",
+			    atomic_read(&adapter->rx_pending),
+			    atomic_read(&adapter->tx_pending),
+			    atomic_read(&adapter->cmd_pending));
 	}
 
 	for (i = 0; i < adapter->priv_num; i++) {
@@ -1264,11 +1376,13 @@
 	wiphy_free(adapter->wiphy);
 
 	/* Unregister device */
-	dev_dbg(adapter->dev, "info: unregister device\n");
+	mwifiex_dbg(adapter, INFO,
+		    "info: unregister device\n");
 	if (adapter->if_ops.unregister_dev)
 		adapter->if_ops.unregister_dev(adapter);
 	/* Free adapter structure */
-	dev_dbg(adapter->dev, "info: free adapter\n");
+	mwifiex_dbg(adapter, INFO,
+		    "info: free adapter\n");
 	mwifiex_free_adapter(adapter);
 
 exit_remove:
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index fe12560..ae98b5b 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -36,6 +36,7 @@
 #include <linux/of.h>
 #include <linux/idr.h>
 #include <linux/inetdevice.h>
+#include <linux/devcoredump.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -147,6 +148,54 @@
 /* Address alignment */
 #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
 
+/**
+ *enum mwifiex_debug_level  -  marvell wifi debug level
+ */
+enum MWIFIEX_DEBUG_LEVEL {
+	MWIFIEX_DBG_MSG		= 0x00000001,
+	MWIFIEX_DBG_FATAL	= 0x00000002,
+	MWIFIEX_DBG_ERROR	= 0x00000004,
+	MWIFIEX_DBG_DATA	= 0x00000008,
+	MWIFIEX_DBG_CMD		= 0x00000010,
+	MWIFIEX_DBG_EVENT	= 0x00000020,
+	MWIFIEX_DBG_INTR	= 0x00000040,
+	MWIFIEX_DBG_IOCTL	= 0x00000080,
+
+	MWIFIEX_DBG_MPA_D	= 0x00008000,
+	MWIFIEX_DBG_DAT_D	= 0x00010000,
+	MWIFIEX_DBG_CMD_D	= 0x00020000,
+	MWIFIEX_DBG_EVT_D	= 0x00040000,
+	MWIFIEX_DBG_FW_D	= 0x00080000,
+	MWIFIEX_DBG_IF_D	= 0x00100000,
+
+	MWIFIEX_DBG_ENTRY	= 0x10000000,
+	MWIFIEX_DBG_WARN	= 0x20000000,
+	MWIFIEX_DBG_INFO	= 0x40000000,
+	MWIFIEX_DBG_DUMP	= 0x80000000,
+
+	MWIFIEX_DBG_ANY		= 0xffffffff
+};
+
+#define MWIFIEX_DEFAULT_DEBUG_MASK	(MWIFIEX_DBG_MSG | \
+					MWIFIEX_DBG_FATAL | \
+					MWIFIEX_DBG_ERROR)
+
+#define mwifiex_dbg(adapter, dbg_mask, fmt, args...)		\
+do {								\
+	if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask)	\
+		if ((adapter)->dev)				\
+			dev_info((adapter)->dev, fmt, ## args);	\
+} while (0)
+
+#define DEBUG_DUMP_DATA_MAX_LEN		128
+#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len)	\
+do {								\
+	if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask)	\
+		print_hex_dump(KERN_DEBUG, str,			\
+			       DUMP_PREFIX_OFFSET, 16, 1,	\
+			       buf, len, false);		\
+} while (0)
+
 struct mwifiex_dbg {
 	u32 num_cmd_host_to_card_failure;
 	u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -451,7 +500,7 @@
 };
 
 enum mwifiex_iface_work_flags {
-	MWIFIEX_IFACE_WORK_FW_DUMP,
+	MWIFIEX_IFACE_WORK_DEVICE_DUMP,
 	MWIFIEX_IFACE_WORK_CARD_RESET,
 };
 
@@ -593,6 +642,7 @@
 	u8 del_list_idx;
 	bool hs2_enabled;
 	struct mwifiex_uap_bss_param bss_cfg;
+	struct cfg80211_chan_def bss_chandef;
 	struct station_parameters *sta_params;
 	struct sk_buff_head tdls_txq;
 	u8 check_tdls_tx;
@@ -611,6 +661,7 @@
 	struct delayed_work dfs_chan_sw_work;
 	struct cfg80211_beacon_data beacon_after;
 	struct mwifiex_11h_intf_state state_11h;
+	struct mwifiex_ds_mem_rw mem_rw;
 };
 
 
@@ -690,6 +741,18 @@
 	struct ieee80211_vht_operation vhtoper;
 };
 
+struct mwifiex_station_stats {
+	u64 last_rx;
+	s8 rssi;
+	u64 rx_bytes;
+	u64 tx_bytes;
+	u32 rx_packets;
+	u32 tx_packets;
+	u32 tx_failed;
+	u8 last_tx_rate;
+	u8 last_tx_htinfo;
+};
+
 /* This is AP/TDLS specific structure which stores information
  * about associated/peer STA
  */
@@ -704,6 +767,7 @@
 	u16 max_amsdu;
 	u8 tdls_status;
 	struct mwifiex_tdls_capab tdls_cap;
+	struct mwifiex_station_stats stats;
 };
 
 struct mwifiex_auto_tdls_peer {
@@ -740,8 +804,8 @@
 	int (*init_fw_port) (struct mwifiex_adapter *);
 	int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
 	void (*card_reset) (struct mwifiex_adapter *);
-	void (*fw_dump)(struct mwifiex_adapter *);
 	int (*reg_dump)(struct mwifiex_adapter *, char *);
+	void (*device_dump)(struct mwifiex_adapter *);
 	int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
 	void (*iface_work)(struct work_struct *work);
 	void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
@@ -750,6 +814,7 @@
 
 struct mwifiex_adapter {
 	u8 iface_type;
+	unsigned int debug_mask;
 	struct mwifiex_iface_comb iface_limit;
 	struct mwifiex_iface_comb curr_iface_comb;
 	struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
@@ -900,7 +965,6 @@
 	u8 key_api_major_ver, key_api_minor_ver;
 	struct memory_type_mapping *mem_type_mapping_tbl;
 	u8 num_mem_types;
-	u8 curr_mem_idx;
 	void *drv_info_dump;
 	u32 drv_info_size;
 	bool scan_chan_gap_enabled;
@@ -909,6 +973,12 @@
 	u32 num_in_chan_stats;
 	int survey_idx;
 	bool auto_tdls;
+	u8 coex_scan;
+	u8 coex_min_scan_time;
+	u8 coex_max_scan_time;
+	u8 coex_win_size;
+	u8 coex_tx_win_size;
+	u8 coex_rx_win_size;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1085,6 +1155,9 @@
 void mwifiex_set_vht_params(struct mwifiex_private *priv,
 			    struct mwifiex_uap_bss_param *bss_cfg,
 			    struct cfg80211_ap_settings *params);
+void mwifiex_set_tpc_params(struct mwifiex_private *priv,
+			    struct mwifiex_uap_bss_param *bss_cfg,
+			    struct cfg80211_ap_settings *params);
 void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
 			   struct cfg80211_ap_settings *params);
 void mwifiex_set_vht_width(struct mwifiex_private *priv,
@@ -1095,6 +1168,11 @@
 		       struct mwifiex_uap_bss_param *bss_cfg,
 		       struct cfg80211_ap_settings *params);
 void mwifiex_set_ba_params(struct mwifiex_private *priv);
+
+void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *pmadapter);
+void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
+					     struct sk_buff *event_skb);
+
 void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
 int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
 				struct host_cmd_ds_command *cmd,
@@ -1332,6 +1410,7 @@
 					struct mwifiex_bssdescriptor *bss_desc);
 
 u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
+u8 mwifiex_sec_chan_offset_to_chan_type(u8 second_chan_offset);
 
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 					      const char *name,
@@ -1349,7 +1428,8 @@
 			 struct cfg80211_beacon_data *data);
 int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
 u8 *mwifiex_11d_code_2_region(u8 code);
-void mwifiex_uap_set_channel(struct mwifiex_uap_bss_param *bss_cfg,
+void mwifiex_uap_set_channel(struct mwifiex_private *priv,
+			     struct mwifiex_uap_bss_param *bss_cfg,
 			     struct cfg80211_chan_def chandef);
 int mwifiex_config_start_uap(struct mwifiex_private *priv,
 			     struct mwifiex_uap_bss_param *bss_cfg);
@@ -1423,6 +1503,8 @@
 void mwifiex_dfs_cac_work_queue(struct work_struct *work);
 void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work);
 void mwifiex_abort_cac(struct mwifiex_private *priv);
+int mwifiex_stop_radar_detection(struct mwifiex_private *priv,
+				 struct cfg80211_chan_def *chandef);
 int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
 				      struct sk_buff *skb);
 
@@ -1434,10 +1516,12 @@
 u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
 			    u8 rx_rate, u8 ht_info);
 
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
-
+void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
+void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
 void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index bcc7751..77b9055 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -57,7 +57,7 @@
 
 	mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
 	if (pci_dma_mapping_error(card->dev, mapping.addr)) {
-		dev_err(adapter->dev, "failed to map pci memory!\n");
+		mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
 		return -1;
 	}
 	mapping.len = size;
@@ -89,8 +89,9 @@
 
 	if (card->sleep_cookie_vbase) {
 		cookie_addr = (u32 *)card->sleep_cookie_vbase;
-		dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n",
-			*cookie_addr);
+		mwifiex_dbg(adapter, INFO,
+			    "info: ACCESS_HW: sleep cookie=0x%x\n",
+			    *cookie_addr);
 		if (*cookie_addr == FW_AWAKE_COOKIE)
 			return true;
 	}
@@ -164,7 +165,8 @@
 	adapter = card->adapter;
 
 	if (!adapter->is_suspended) {
-		dev_warn(adapter->dev, "Device already resumed\n");
+		mwifiex_dbg(adapter, WARN,
+			    "Device already resumed\n");
 		return 0;
 	}
 
@@ -361,16 +363,16 @@
 		sleep_cookie = *(u32 *)buffer;
 
 		if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
-			dev_dbg(adapter->dev,
-				"sleep cookie found at count %d\n", count);
+			mwifiex_dbg(adapter, INFO,
+				    "sleep cookie found at count %d\n", count);
 			break;
 		}
 		usleep_range(20, 30);
 	}
 
 	if (count >= max_delay_loop_cnt)
-		dev_dbg(adapter->dev,
-			"max count reached while accessing sleep cookie\n");
+		mwifiex_dbg(adapter, INFO,
+			    "max count reached while accessing sleep cookie\n");
 }
 
 /* This function wakes up the card by reading fw_status register. */
@@ -380,20 +382,23 @@
 	struct pcie_service_card *card = adapter->card;
 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-	dev_dbg(adapter->dev, "event: Wakeup device...\n");
+	mwifiex_dbg(adapter, EVENT,
+		    "event: Wakeup device...\n");
 
 	if (reg->sleep_cookie)
 		mwifiex_pcie_dev_wakeup_delay(adapter);
 
 	/* Reading fw_status register will wakeup device */
 	if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
-		dev_warn(adapter->dev, "Reading fw_status register failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Reading fw_status register failed\n");
 		return -1;
 	}
 
 	if (reg->sleep_cookie) {
 		mwifiex_pcie_dev_wakeup_delay(adapter);
-		dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n");
+		mwifiex_dbg(adapter, INFO,
+			    "PCIE wakeup: Setting PS_STATE_AWAKE\n");
 		adapter->ps_state = PS_STATE_AWAKE;
 	}
 
@@ -407,7 +412,8 @@
  */
 static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
 {
-	dev_dbg(adapter->dev, "cmd: Wakeup device completed\n");
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: Wakeup device completed\n");
 
 	return 0;
 }
@@ -423,7 +429,8 @@
 	if (mwifiex_pcie_ok_to_access_hw(adapter)) {
 		if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
 				      0x00000000)) {
-			dev_warn(adapter->dev, "Disable host interrupt failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Disable host interrupt failed\n");
 			return -1;
 		}
 	}
@@ -443,7 +450,8 @@
 		/* Simply write the mask to the register */
 		if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
 				      HOST_INTR_MASK)) {
-			dev_warn(adapter->dev, "Enable host interrupt failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Enable host interrupt failed\n");
 			return -1;
 		}
 	}
@@ -499,8 +507,8 @@
 		skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
 						  GFP_KERNEL | GFP_DMA);
 		if (!skb) {
-			dev_err(adapter->dev,
-				"Unable to allocate skb for RX ring.\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Unable to allocate skb for RX ring.\n");
 			kfree(card->rxbd_ring_vbase);
 			return -ENOMEM;
 		}
@@ -512,10 +520,10 @@
 
 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
-		dev_dbg(adapter->dev,
-			"info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
-			skb, skb->len, skb->data, (u32)buf_pa,
-			(u32)((u64)buf_pa >> 32));
+		mwifiex_dbg(adapter, INFO,
+			    "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+			    skb, skb->len, skb->data, (u32)buf_pa,
+			    (u32)((u64)buf_pa >> 32));
 
 		card->rx_buf_list[i] = skb;
 		if (reg->pfu_enabled) {
@@ -556,8 +564,8 @@
 		/* Allocate skb here so that firmware can DMA data from it */
 		skb = dev_alloc_skb(MAX_EVENT_SIZE);
 		if (!skb) {
-			dev_err(adapter->dev,
-				"Unable to allocate skb for EVENT buf.\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Unable to allocate skb for EVENT buf.\n");
 			kfree(card->evtbd_ring_vbase);
 			return -ENOMEM;
 		}
@@ -569,10 +577,10 @@
 
 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
-		dev_dbg(adapter->dev,
-			"info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
-			skb, skb->len, skb->data, (u32)buf_pa,
-			(u32)((u64)buf_pa >> 32));
+		mwifiex_dbg(adapter, EVENT,
+			    "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+			    skb, skb->len, skb->data, (u32)buf_pa,
+			    (u32)((u64)buf_pa >> 32));
 
 		card->evt_buf_list[i] = skb;
 		card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
@@ -715,21 +723,23 @@
 		card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
 				       MWIFIEX_MAX_TXRX_BD;
 
-	dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
-		card->txbd_ring_size);
+	mwifiex_dbg(adapter, INFO,
+		    "info: txbd_ring: Allocating %d bytes\n",
+		    card->txbd_ring_size);
 	card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
 						     card->txbd_ring_size,
 						     &card->txbd_ring_pbase);
 	if (!card->txbd_ring_vbase) {
-		dev_err(adapter->dev,
-			"allocate consistent memory (%d bytes) failed!\n",
-			card->txbd_ring_size);
+		mwifiex_dbg(adapter, ERROR,
+			    "allocate consistent memory (%d bytes) failed!\n",
+			    card->txbd_ring_size);
 		return -ENOMEM;
 	}
-	dev_dbg(adapter->dev,
-		"info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
-		card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
-		(u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
+	mwifiex_dbg(adapter, DATA,
+		    "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
+		    card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
+		    (u32)((u64)card->txbd_ring_pbase >> 32),
+		    card->txbd_ring_size);
 
 	return mwifiex_init_txq_ring(adapter);
 }
@@ -777,23 +787,24 @@
 		card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
 				       MWIFIEX_MAX_TXRX_BD;
 
-	dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
-		card->rxbd_ring_size);
+	mwifiex_dbg(adapter, INFO,
+		    "info: rxbd_ring: Allocating %d bytes\n",
+		    card->rxbd_ring_size);
 	card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
 						     card->rxbd_ring_size,
 						     &card->rxbd_ring_pbase);
 	if (!card->rxbd_ring_vbase) {
-		dev_err(adapter->dev,
-			"allocate consistent memory (%d bytes) failed!\n",
-			card->rxbd_ring_size);
+		mwifiex_dbg(adapter, ERROR,
+			    "allocate consistent memory (%d bytes) failed!\n",
+			    card->rxbd_ring_size);
 		return -ENOMEM;
 	}
 
-	dev_dbg(adapter->dev,
-		"info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
-		card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
-		(u32)((u64)card->rxbd_ring_pbase >> 32),
-		card->rxbd_ring_size);
+	mwifiex_dbg(adapter, DATA,
+		    "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+		    card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
+		    (u32)((u64)card->rxbd_ring_pbase >> 32),
+		    card->rxbd_ring_size);
 
 	return mwifiex_init_rxq_ring(adapter);
 }
@@ -840,23 +851,24 @@
 	card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
 				MWIFIEX_MAX_EVT_BD;
 
-	dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
+	mwifiex_dbg(adapter, INFO,
+		    "info: evtbd_ring: Allocating %d bytes\n",
 		card->evtbd_ring_size);
 	card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
 						      card->evtbd_ring_size,
 						      &card->evtbd_ring_pbase);
 	if (!card->evtbd_ring_vbase) {
-		dev_err(adapter->dev,
-			"allocate consistent memory (%d bytes) failed!\n",
-			card->evtbd_ring_size);
+		mwifiex_dbg(adapter, ERROR,
+			    "allocate consistent memory (%d bytes) failed!\n",
+			    card->evtbd_ring_size);
 		return -ENOMEM;
 	}
 
-	dev_dbg(adapter->dev,
-		"info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
-		card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
-		(u32)((u64)card->evtbd_ring_pbase >> 32),
-		card->evtbd_ring_size);
+	mwifiex_dbg(adapter, EVENT,
+		    "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
+		    card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
+		    (u32)((u64)card->evtbd_ring_pbase >> 32),
+		    card->evtbd_ring_size);
 
 	return mwifiex_pcie_init_evt_ring(adapter);
 }
@@ -895,8 +907,8 @@
 	/* Allocate memory for receiving command response data */
 	skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
 	if (!skb) {
-		dev_err(adapter->dev,
-			"Unable to allocate skb for command response data.\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Unable to allocate skb for command response data.\n");
 		return -ENOMEM;
 	}
 	skb_put(skb, MWIFIEX_UPLD_SIZE);
@@ -944,14 +956,16 @@
 	card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
 						     &card->sleep_cookie_pbase);
 	if (!card->sleep_cookie_vbase) {
-		dev_err(adapter->dev, "pci_alloc_consistent failed!\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "pci_alloc_consistent failed!\n");
 		return -ENOMEM;
 	}
 	/* Init val of Sleep Cookie */
 	*(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
 
-	dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
-		*((u32 *)card->sleep_cookie_vbase));
+	mwifiex_dbg(adapter, INFO,
+		    "alloc_scook: sleep cookie=0x%x\n",
+		    *((u32 *)card->sleep_cookie_vbase));
 
 	return 0;
 }
@@ -993,8 +1007,8 @@
 		 */
 		if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
 				      CPU_INTR_DNLD_RDY)) {
-			dev_err(adapter->dev,
-				"failed to assert dnld-rdy interrupt.\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "failed to assert dnld-rdy interrupt.\n");
 			return -1;
 		}
 	}
@@ -1018,13 +1032,14 @@
 
 	/* Read the TX ring read pointer set by firmware */
 	if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
-		dev_err(adapter->dev,
-			"SEND COMP: failed to read reg->tx_rdptr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "SEND COMP: failed to read reg->tx_rdptr\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
-		card->txbd_rdptr, rdptr);
+	mwifiex_dbg(adapter, DATA,
+		    "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
+		    card->txbd_rdptr, rdptr);
 
 	num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
 	/* free from previous txbd_rdptr to current txbd_rdptr */
@@ -1038,9 +1053,9 @@
 		skb = card->tx_buf_list[wrdoneidx];
 
 		if (skb) {
-			dev_dbg(adapter->dev,
-				"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
-				skb, wrdoneidx);
+			mwifiex_dbg(adapter, DATA,
+				    "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
+				    skb, wrdoneidx);
 			mwifiex_unmap_pci_memory(adapter, skb,
 						 PCI_DMA_TODEVICE);
 
@@ -1112,8 +1127,9 @@
 	__le16 *tmp;
 
 	if (!(skb->data && skb->len)) {
-		dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n",
-			__func__, skb->data, skb->len);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s(): invalid parameter <%p, %#x>\n",
+			    __func__, skb->data, skb->len);
 		return -1;
 	}
 
@@ -1121,7 +1137,8 @@
 		mwifiex_pm_wakeup_card(adapter);
 
 	num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
-	dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
+	mwifiex_dbg(adapter, DATA,
+		    "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
 		card->txbd_rdptr, card->txbd_wrptr);
 	if (mwifiex_pcie_txbd_not_full(card)) {
 		u8 *payload;
@@ -1175,39 +1192,40 @@
 		/* Write the TX ring write pointer in to reg->tx_wrptr */
 		if (mwifiex_write_reg(adapter, reg->tx_wrptr,
 				      card->txbd_wrptr | rx_val)) {
-			dev_err(adapter->dev,
-				"SEND DATA: failed to write reg->tx_wrptr\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "SEND DATA: failed to write reg->tx_wrptr\n");
 			ret = -1;
 			goto done_unmap;
 		}
 		if ((mwifiex_pcie_txbd_not_full(card)) &&
 		    tx_param->next_pkt_len) {
 			/* have more packets and TxBD still can hold more */
-			dev_dbg(adapter->dev,
-				"SEND DATA: delay dnld-rdy interrupt.\n");
+			mwifiex_dbg(adapter, DATA,
+				    "SEND DATA: delay dnld-rdy interrupt.\n");
 			adapter->data_sent = false;
 		} else {
 			/* Send the TX ready interrupt */
 			if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
 					      CPU_INTR_DNLD_RDY)) {
-				dev_err(adapter->dev,
-					"SEND DATA: failed to assert dnld-rdy interrupt.\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "SEND DATA: failed to assert dnld-rdy interrupt.\n");
 				ret = -1;
 				goto done_unmap;
 			}
 		}
-		dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
-			"%#x> and sent packet to firmware successfully\n",
-			card->txbd_rdptr, card->txbd_wrptr);
+		mwifiex_dbg(adapter, DATA,
+			    "info: SEND DATA: Updated <Rd: %#x, Wr:\t"
+			    "%#x> and sent packet to firmware successfully\n",
+			    card->txbd_rdptr, card->txbd_wrptr);
 	} else {
-		dev_dbg(adapter->dev,
-			"info: TX Ring full, can't send packets to fw\n");
+		mwifiex_dbg(adapter, DATA,
+			    "info: TX Ring full, can't send packets to fw\n");
 		adapter->data_sent = true;
 		/* Send the TX ready interrupt */
 		if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
 				      CPU_INTR_DNLD_RDY))
-			dev_err(adapter->dev,
-				"SEND DATA: failed to assert door-bell intr\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "SEND DATA: failed to assert door-bell intr\n");
 		return -EBUSY;
 	}
 
@@ -1243,8 +1261,8 @@
 
 	/* Read the RX ring Write pointer set by firmware */
 	if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
-		dev_err(adapter->dev,
-			"RECV DATA: failed to read reg->rx_wrptr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "RECV DATA: failed to read reg->rx_wrptr\n");
 		ret = -1;
 		goto done;
 	}
@@ -1277,15 +1295,15 @@
 		rx_len = le16_to_cpu(pkt_len);
 		if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
 			    rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
-			dev_err(adapter->dev,
-				"Invalid RX len %d, Rd=%#x, Wr=%#x\n",
-				rx_len, card->rxbd_rdptr, wrptr);
+			mwifiex_dbg(adapter, ERROR,
+				    "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
+				    rx_len, card->rxbd_rdptr, wrptr);
 			dev_kfree_skb_any(skb_data);
 		} else {
 			skb_put(skb_data, rx_len);
-			dev_dbg(adapter->dev,
-				"info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
-				card->rxbd_rdptr, wrptr, rx_len);
+			mwifiex_dbg(adapter, DATA,
+				    "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
+				    card->rxbd_rdptr, wrptr, rx_len);
 			skb_pull(skb_data, INTF_HEADER_LEN);
 			if (adapter->rx_work_enabled) {
 				skb_queue_tail(&adapter->rx_data_q, skb_data);
@@ -1299,8 +1317,8 @@
 		skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
 						      GFP_KERNEL | GFP_DMA);
 		if (!skb_tmp) {
-			dev_err(adapter->dev,
-				"Unable to allocate skb.\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Unable to allocate skb.\n");
 			return -ENOMEM;
 		}
 
@@ -1311,9 +1329,9 @@
 
 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
 
-		dev_dbg(adapter->dev,
-			"RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
-			skb_tmp, rd_index);
+		mwifiex_dbg(adapter, INFO,
+			    "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
+			    skb_tmp, rd_index);
 		card->rx_buf_list[rd_index] = skb_tmp;
 
 		if (reg->pfu_enabled) {
@@ -1336,28 +1354,29 @@
 					     reg->rx_rollover_ind) ^
 					     reg->rx_rollover_ind);
 		}
-		dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
-			card->rxbd_rdptr, wrptr);
+		mwifiex_dbg(adapter, DATA,
+			    "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
+			    card->rxbd_rdptr, wrptr);
 
 		tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
 		/* Write the RX ring read pointer in to reg->rx_rdptr */
 		if (mwifiex_write_reg(adapter, reg->rx_rdptr,
 				      card->rxbd_rdptr | tx_val)) {
-			dev_err(adapter->dev,
-				"RECV DATA: failed to write reg->rx_rdptr\n");
+			mwifiex_dbg(adapter, DATA,
+				    "RECV DATA: failed to write reg->rx_rdptr\n");
 			ret = -1;
 			goto done;
 		}
 
 		/* Read the RX ring Write pointer set by firmware */
 		if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
-			dev_err(adapter->dev,
-				"RECV DATA: failed to read reg->rx_wrptr\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "RECV DATA: failed to read reg->rx_wrptr\n");
 			ret = -1;
 			goto done;
 		}
-		dev_dbg(adapter->dev,
-			"info: RECV DATA: Rcvd packet from fw successfully\n");
+		mwifiex_dbg(adapter, DATA,
+			    "info: RECV DATA: Rcvd packet from fw successfully\n");
 		card->rxbd_wrptr = wrptr;
 	}
 
@@ -1376,9 +1395,9 @@
 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
 	if (!(skb->data && skb->len)) {
-		dev_err(adapter->dev,
-			"Invalid parameter in %s <%p. len %d>\n",
-			__func__, skb->data, skb->len);
+		mwifiex_dbg(adapter, ERROR,
+			    "Invalid parameter in %s <%p. len %d>\n",
+			    __func__, skb->data, skb->len);
 		return -1;
 	}
 
@@ -1391,9 +1410,9 @@
 	 * address scratch register
 	 */
 	if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
-		dev_err(adapter->dev,
-			"%s: failed to write download command to boot code.\n",
-			__func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: failed to write download command to boot code.\n",
+			    __func__);
 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
 		return -1;
 	}
@@ -1403,18 +1422,18 @@
 	 */
 	if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
 			      (u32)((u64)buf_pa >> 32))) {
-		dev_err(adapter->dev,
-			"%s: failed to write download command to boot code.\n",
-			__func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: failed to write download command to boot code.\n",
+			    __func__);
 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
 		return -1;
 	}
 
 	/* Write the command length to cmd_size scratch register */
 	if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
-		dev_err(adapter->dev,
-			"%s: failed to write command len to cmd_size scratch reg\n",
-			__func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: failed to write command len to cmd_size scratch reg\n",
+			    __func__);
 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
 		return -1;
 	}
@@ -1422,8 +1441,8 @@
 	/* Ring the door bell */
 	if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
 			      CPU_INTR_DOOR_BELL)) {
-		dev_err(adapter->dev,
-			"%s: failed to assert door-bell intr\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: failed to assert door-bell intr\n", __func__);
 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
 		return -1;
 	}
@@ -1443,8 +1462,8 @@
 	/* Write the RX ring read pointer in to reg->rx_rdptr */
 	if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
 			      tx_wrap)) {
-		dev_err(adapter->dev,
-			"RECV DATA: failed to write reg->rx_rdptr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "RECV DATA: failed to write reg->rx_rdptr\n");
 		return -1;
 	}
 	return 0;
@@ -1462,15 +1481,16 @@
 	u8 *payload = (u8 *)skb->data;
 
 	if (!(skb->data && skb->len)) {
-		dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
-			__func__, skb->data, skb->len);
+		mwifiex_dbg(adapter, ERROR,
+			    "Invalid parameter in %s <%p, %#x>\n",
+			    __func__, skb->data, skb->len);
 		return -1;
 	}
 
 	/* Make sure a command response buffer is available */
 	if (!card->cmdrsp_buf) {
-		dev_err(adapter->dev,
-			"No response buffer available, send command failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "No response buffer available, send command failed\n");
 		return -EBUSY;
 	}
 
@@ -1503,8 +1523,8 @@
 		   address */
 		if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
 				      (u32)cmdrsp_buf_pa)) {
-			dev_err(adapter->dev,
-				"Failed to write download cmd to boot code.\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Failed to write download cmd to boot code.\n");
 			ret = -1;
 			goto done;
 		}
@@ -1512,8 +1532,8 @@
 		   address */
 		if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
 				      (u32)((u64)cmdrsp_buf_pa >> 32))) {
-			dev_err(adapter->dev,
-				"Failed to write download cmd to boot code.\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Failed to write download cmd to boot code.\n");
 			ret = -1;
 			goto done;
 		}
@@ -1523,16 +1543,16 @@
 	/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
 	if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
 			      (u32)cmd_buf_pa)) {
-		dev_err(adapter->dev,
-			"Failed to write download cmd to boot code.\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Failed to write download cmd to boot code.\n");
 		ret = -1;
 		goto done;
 	}
 	/* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
 	if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
 			      (u32)((u64)cmd_buf_pa >> 32))) {
-		dev_err(adapter->dev,
-			"Failed to write download cmd to boot code.\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Failed to write download cmd to boot code.\n");
 		ret = -1;
 		goto done;
 	}
@@ -1540,8 +1560,8 @@
 	/* Write the command length to reg->cmd_size */
 	if (mwifiex_write_reg(adapter, reg->cmd_size,
 			      card->cmd_buf->len)) {
-		dev_err(adapter->dev,
-			"Failed to write cmd len to reg->cmd_size\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Failed to write cmd len to reg->cmd_size\n");
 		ret = -1;
 		goto done;
 	}
@@ -1549,8 +1569,8 @@
 	/* Ring the door bell */
 	if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
 			      CPU_INTR_DOOR_BELL)) {
-		dev_err(adapter->dev,
-			"Failed to assert door-bell intr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Failed to assert door-bell intr\n");
 		ret = -1;
 		goto done;
 	}
@@ -1574,7 +1594,8 @@
 	u16 rx_len;
 	__le16 pkt_len;
 
-	dev_dbg(adapter->dev, "info: Rx CMD Response\n");
+	mwifiex_dbg(adapter, CMD,
+		    "info: Rx CMD Response\n");
 
 	mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
 
@@ -1598,8 +1619,8 @@
 			if (mwifiex_write_reg(adapter,
 					      PCIE_CPU_INT_EVENT,
 					      CPU_INTR_SLEEP_CFM_DONE)) {
-				dev_warn(adapter->dev,
-					 "Write register failed\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "Write register failed\n");
 				return -1;
 			}
 			mwifiex_delay_for_sleep_cookie(adapter,
@@ -1608,8 +1629,8 @@
 			       mwifiex_pcie_ok_to_access_hw(adapter))
 				usleep_range(50, 60);
 		} else {
-			dev_err(adapter->dev,
-				"There is no command but got cmdrsp\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "There is no command but got cmdrsp\n");
 		}
 		memcpy(adapter->upld_buf, skb->data,
 		       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
@@ -1628,15 +1649,15 @@
 		   will prevent firmware from writing to the same response
 		   buffer again. */
 		if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
-			dev_err(adapter->dev,
-				"cmd_done: failed to clear cmd_rsp_addr_lo\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cmd_done: failed to clear cmd_rsp_addr_lo\n");
 			return -1;
 		}
 		/* Write the upper 32bits of the cmdrsp buffer physical
 		   address */
 		if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
-			dev_err(adapter->dev,
-				"cmd_done: failed to clear cmd_rsp_addr_hi\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "cmd_done: failed to clear cmd_rsp_addr_hi\n");
 			return -1;
 		}
 	}
@@ -1678,25 +1699,28 @@
 		mwifiex_pm_wakeup_card(adapter);
 
 	if (adapter->event_received) {
-		dev_dbg(adapter->dev, "info: Event being processed, "
-			"do not process this interrupt just yet\n");
+		mwifiex_dbg(adapter, EVENT,
+			    "info: Event being processed,\t"
+			    "do not process this interrupt just yet\n");
 		return 0;
 	}
 
 	if (rdptr >= MWIFIEX_MAX_EVT_BD) {
-		dev_dbg(adapter->dev, "info: Invalid read pointer...\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "info: Invalid read pointer...\n");
 		return -1;
 	}
 
 	/* Read the event ring write pointer set by firmware */
 	if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
-		dev_err(adapter->dev,
-			"EventReady: failed to read reg->evt_wrptr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "EventReady: failed to read reg->evt_wrptr\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
-		card->evtbd_rdptr, wrptr);
+	mwifiex_dbg(adapter, EVENT,
+		    "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
+		    card->evtbd_rdptr, wrptr);
 	if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
 					      & MWIFIEX_EVTBD_MASK)) ||
 	    ((wrptr & reg->evt_rollover_ind) ==
@@ -1705,7 +1729,8 @@
 		__le16 data_len = 0;
 		u16 evt_len;
 
-		dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
+		mwifiex_dbg(adapter, INFO,
+			    "info: Read Index: %d\n", rdptr);
 		skb_cmd = card->evt_buf_list[rdptr];
 		mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
 
@@ -1721,9 +1746,10 @@
 		   len is 2 bytes followed by type which is 2 bytes */
 		memcpy(&data_len, skb_cmd->data, sizeof(__le16));
 		evt_len = le16_to_cpu(data_len);
-
+		skb_trim(skb_cmd, evt_len);
 		skb_pull(skb_cmd, INTF_HEADER_LEN);
-		dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len);
+		mwifiex_dbg(adapter, EVENT,
+			    "info: Event length: %d\n", evt_len);
 
 		if ((evt_len > 0) && (evt_len  < MAX_EVENT_SIZE))
 			memcpy(adapter->event_body, skb_cmd->data +
@@ -1740,8 +1766,8 @@
 	} else {
 		if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
 				      CPU_INTR_EVENT_DONE)) {
-			dev_warn(adapter->dev,
-				 "Write register failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Write register failed\n");
 			return -1;
 		}
 	}
@@ -1766,15 +1792,16 @@
 		return 0;
 
 	if (rdptr >= MWIFIEX_MAX_EVT_BD) {
-		dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
-			rdptr);
+		mwifiex_dbg(adapter, ERROR,
+			    "event_complete: Invalid rdptr 0x%x\n",
+			    rdptr);
 		return -EINVAL;
 	}
 
 	/* Read the event ring write pointer set by firmware */
 	if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
-		dev_err(adapter->dev,
-			"event_complete: failed to read reg->evt_wrptr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "event_complete: failed to read reg->evt_wrptr\n");
 		return -1;
 	}
 
@@ -1791,9 +1818,9 @@
 		desc->flags = 0;
 		skb = NULL;
 	} else {
-		dev_dbg(adapter->dev,
-			"info: ERROR: buf still valid at index %d, <%p, %p>\n",
-			rdptr, card->evt_buf_list[rdptr], skb);
+		mwifiex_dbg(adapter, ERROR,
+			    "info: ERROR: buf still valid at index %d, <%p, %p>\n",
+			    rdptr, card->evt_buf_list[rdptr], skb);
 	}
 
 	if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
@@ -1802,18 +1829,20 @@
 					reg->evt_rollover_ind);
 	}
 
-	dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
-		card->evtbd_rdptr, wrptr);
+	mwifiex_dbg(adapter, EVENT,
+		    "info: Updated <Rd: 0x%x, Wr: 0x%x>",
+		    card->evtbd_rdptr, wrptr);
 
 	/* Write the event ring read pointer in to reg->evt_rdptr */
 	if (mwifiex_write_reg(adapter, reg->evt_rdptr,
 			      card->evtbd_rdptr)) {
-		dev_err(adapter->dev,
-			"event_complete: failed to read reg->evt_rdptr\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "event_complete: failed to read reg->evt_rdptr\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "info: Check Events Again\n");
+	mwifiex_dbg(adapter, EVENT,
+		    "info: Check Events Again\n");
 	ret = mwifiex_pcie_process_event_ready(adapter);
 
 	return ret;
@@ -1840,17 +1869,18 @@
 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
 	if (!firmware || !firmware_len) {
-		dev_err(adapter->dev,
-			"No firmware image found! Terminating download\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "No firmware image found! Terminating download\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n",
-		firmware_len);
+	mwifiex_dbg(adapter, INFO,
+		    "info: Downloading FW image (%d bytes)\n",
+		    firmware_len);
 
 	if (mwifiex_pcie_disable_host_int(adapter)) {
-		dev_err(adapter->dev,
-			"%s: Disabling interrupts failed.\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: Disabling interrupts failed.\n", __func__);
 		return -1;
 	}
 
@@ -1872,8 +1902,8 @@
 			ret = mwifiex_read_reg(adapter, reg->cmd_size,
 					       &len);
 			if (ret) {
-				dev_warn(adapter->dev,
-					 "Failed reading len from boot code\n");
+				mwifiex_dbg(adapter, FATAL,
+					    "Failed reading len from boot code\n");
 				goto done;
 			}
 			if (len)
@@ -1884,8 +1914,9 @@
 		if (!len) {
 			break;
 		} else if (len > MWIFIEX_UPLD_SIZE) {
-			pr_err("FW download failure @ %d, invalid length %d\n",
-			       offset, len);
+			mwifiex_dbg(adapter, ERROR,
+				    "FW download failure @ %d, invalid length %d\n",
+				    offset, len);
 			ret = -1;
 			goto done;
 		}
@@ -1895,14 +1926,16 @@
 		if (len & BIT(0)) {
 			block_retry_cnt++;
 			if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) {
-				pr_err("FW download failure @ %d, over max "
-				       "retry count\n", offset);
+				mwifiex_dbg(adapter, ERROR,
+					    "FW download failure @ %d, over max\t"
+					    "retry count\n", offset);
 				ret = -1;
 				goto done;
 			}
-			dev_err(adapter->dev, "FW CRC error indicated by the "
-				"helper: len = 0x%04X, txlen = %d\n",
-				len, txlen);
+			mwifiex_dbg(adapter, ERROR,
+				    "FW CRC error indicated by the\t"
+				    "helper: len = 0x%04X, txlen = %d\n",
+				    len, txlen);
 			len &= ~BIT(0);
 			/* Setting this to 0 to resend from same offset */
 			txlen = 0;
@@ -1913,7 +1946,7 @@
 			if (firmware_len - offset < txlen)
 				txlen = firmware_len - offset;
 
-			dev_dbg(adapter->dev, ".");
+			mwifiex_dbg(adapter, INFO, ".");
 
 			tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
 				    card->pcie.blksz_fw_dl;
@@ -1927,8 +1960,8 @@
 
 		/* Send the boot command to device */
 		if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
-			dev_err(adapter->dev,
-				"Failed to send firmware download command\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Failed to send firmware download command\n");
 			ret = -1;
 			goto done;
 		}
@@ -1937,9 +1970,10 @@
 		do {
 			if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
 					     &ireg_intr)) {
-				dev_err(adapter->dev, "%s: Failed to read "
-					"interrupt status during fw dnld.\n",
-					__func__);
+				mwifiex_dbg(adapter, ERROR,
+					    "%s: Failed to read\t"
+					    "interrupt status during fw dnld.\n",
+					    __func__);
 				mwifiex_unmap_pci_memory(adapter, skb,
 							 PCI_DMA_TODEVICE);
 				ret = -1;
@@ -1953,8 +1987,8 @@
 		offset += txlen;
 	} while (true);
 
-	dev_notice(adapter->dev,
-		   "info: FW download over, size %d bytes\n", offset);
+	mwifiex_dbg(adapter, MSG,
+		    "info: FW download over, size %d bytes\n", offset);
 
 	ret = 0;
 
@@ -1980,15 +2014,17 @@
 	/* Mask spurios interrupts */
 	if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
 			      HOST_INTR_MASK)) {
-		dev_warn(adapter->dev, "Write register failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Write register failed\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "Setting driver ready signature\n");
+	mwifiex_dbg(adapter, INFO,
+		    "Setting driver ready signature\n");
 	if (mwifiex_write_reg(adapter, reg->drv_rdy,
 			      FIRMWARE_READY_PCIE)) {
-		dev_err(adapter->dev,
-			"Failed to write driver ready signature\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Failed to write driver ready signature\n");
 		return -1;
 	}
 
@@ -2015,12 +2051,13 @@
 				     &winner_status))
 			ret = -1;
 		else if (!winner_status) {
-			dev_err(adapter->dev, "PCI-E is the winner\n");
+			mwifiex_dbg(adapter, INFO,
+				    "PCI-E is the winner\n");
 			adapter->winner = 1;
 		} else {
-			dev_err(adapter->dev,
-				"PCI-E is not the winner <%#x,%d>, exit dnld\n",
-				ret, adapter->winner);
+			mwifiex_dbg(adapter, ERROR,
+				    "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+				    ret, adapter->winner);
 		}
 	}
 
@@ -2039,7 +2076,7 @@
 		return;
 
 	if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
-		dev_warn(adapter->dev, "Read register failed\n");
+		mwifiex_dbg(adapter, ERROR, "Read register failed\n");
 		return;
 	}
 
@@ -2050,7 +2087,8 @@
 		/* Clear the pending interrupts */
 		if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS,
 				      ~pcie_ireg)) {
-			dev_warn(adapter->dev, "Write register failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Write register failed\n");
 			return;
 		}
 		spin_lock_irqsave(&adapter->int_lock, flags);
@@ -2133,21 +2171,24 @@
 	while (pcie_ireg & HOST_INTR_MASK) {
 		if (pcie_ireg & HOST_INTR_DNLD_DONE) {
 			pcie_ireg &= ~HOST_INTR_DNLD_DONE;
-			dev_dbg(adapter->dev, "info: TX DNLD Done\n");
+			mwifiex_dbg(adapter, INTR,
+				    "info: TX DNLD Done\n");
 			ret = mwifiex_pcie_send_data_complete(adapter);
 			if (ret)
 				return ret;
 		}
 		if (pcie_ireg & HOST_INTR_UPLD_RDY) {
 			pcie_ireg &= ~HOST_INTR_UPLD_RDY;
-			dev_dbg(adapter->dev, "info: Rx DATA\n");
+			mwifiex_dbg(adapter, INTR,
+				    "info: Rx DATA\n");
 			ret = mwifiex_pcie_process_recv_data(adapter);
 			if (ret)
 				return ret;
 		}
 		if (pcie_ireg & HOST_INTR_EVENT_RDY) {
 			pcie_ireg &= ~HOST_INTR_EVENT_RDY;
-			dev_dbg(adapter->dev, "info: Rx EVENT\n");
+			mwifiex_dbg(adapter, INTR,
+				    "info: Rx EVENT\n");
 			ret = mwifiex_pcie_process_event_ready(adapter);
 			if (ret)
 				return ret;
@@ -2156,8 +2197,8 @@
 		if (pcie_ireg & HOST_INTR_CMD_DONE) {
 			pcie_ireg &= ~HOST_INTR_CMD_DONE;
 			if (adapter->cmd_sent) {
-				dev_dbg(adapter->dev,
-					"info: CMD sent Interrupt\n");
+				mwifiex_dbg(adapter, INTR,
+					    "info: CMD sent Interrupt\n");
 				adapter->cmd_sent = false;
 			}
 			/* Handle command response */
@@ -2169,8 +2210,8 @@
 		if (mwifiex_pcie_ok_to_access_hw(adapter)) {
 			if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
 					     &pcie_ireg)) {
-				dev_warn(adapter->dev,
-					 "Read register failed\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "Read register failed\n");
 				return -1;
 			}
 
@@ -2178,16 +2219,17 @@
 				if (mwifiex_write_reg(adapter,
 						      PCIE_HOST_INT_STATUS,
 						      ~pcie_ireg)) {
-					dev_warn(adapter->dev,
-						 "Write register failed\n");
+					mwifiex_dbg(adapter, ERROR,
+						    "Write register failed\n");
 					return -1;
 				}
 			}
 
 		}
 	}
-	dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
-		adapter->cmd_sent, adapter->data_sent);
+	mwifiex_dbg(adapter, INTR,
+		    "info: cmd_sent=%d data_sent=%d\n",
+		    adapter->cmd_sent, adapter->data_sent);
 	if (adapter->ps_state != PS_STATE_SLEEP)
 		mwifiex_pcie_enable_host_int(adapter);
 
@@ -2209,7 +2251,8 @@
 				     struct mwifiex_tx_param *tx_param)
 {
 	if (!skb) {
-		dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "Passed NULL skb to %s\n", __func__);
 		return -1;
 	}
 
@@ -2232,7 +2275,8 @@
 
 	ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
 	if (ret) {
-		dev_err(adapter->dev, "PCIE write err\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "PCIE write err\n");
 		return RDWR_STATUS_FAILURE;
 	}
 
@@ -2243,24 +2287,25 @@
 		if (doneflag && ctrl_data == doneflag)
 			return RDWR_STATUS_DONE;
 		if (ctrl_data != FW_DUMP_HOST_READY) {
-			dev_info(adapter->dev,
-				 "The ctrl reg was changed, re-try again!\n");
+			mwifiex_dbg(adapter, WARN,
+				    "The ctrl reg was changed, re-try again!\n");
 			ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
 						FW_DUMP_HOST_READY);
 			if (ret) {
-				dev_err(adapter->dev, "PCIE write err\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "PCIE write err\n");
 				return RDWR_STATUS_FAILURE;
 			}
 		}
 		usleep_range(100, 200);
 	}
 
-	dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+	mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n");
 	return RDWR_STATUS_FAILURE;
 }
 
 /* This function dump firmware memory to file */
-static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 {
 	struct pcie_service_card *card = adapter->card;
 	const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
@@ -2269,7 +2314,6 @@
 	enum rdwr_status stat;
 	u32 memory_size;
 	int ret;
-	static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
 
 	if (!card->pcie.can_dump_fw)
 		return;
@@ -2284,12 +2328,12 @@
 		entry->mem_size = 0;
 	}
 
-	dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+	mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
 
 	/* Read the number of the memories which will dump */
 	stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
 	if (stat == RDWR_STATUS_FAILURE)
-		goto done;
+		return;
 
 	reg = creg->fw_dump_start;
 	mwifiex_read_reg_byte(adapter, reg, &dump_num);
@@ -2300,7 +2344,7 @@
 
 		stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
 		if (stat == RDWR_STATUS_FAILURE)
-			goto done;
+			return;
 
 		memory_size = 0;
 		reg = creg->fw_dump_start;
@@ -2311,36 +2355,36 @@
 		}
 
 		if (memory_size == 0) {
-			dev_info(adapter->dev, "Firmware dump Finished!\n");
+			mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
 			ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
 						FW_DUMP_READ_DONE);
 			if (ret) {
-				dev_err(adapter->dev, "PCIE write err\n");
-				goto done;
+				mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
+				return;
 			}
 			break;
 		}
 
-		dev_info(adapter->dev,
-			 "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+		mwifiex_dbg(adapter, DUMP,
+			    "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
 		entry->mem_ptr = vmalloc(memory_size + 1);
 		entry->mem_size = memory_size;
 		if (!entry->mem_ptr) {
-			dev_err(adapter->dev,
-				"Vmalloc %s failed\n", entry->mem_name);
-			goto done;
+			mwifiex_dbg(adapter, ERROR,
+				    "Vmalloc %s failed\n", entry->mem_name);
+			return;
 		}
 		dbg_ptr = entry->mem_ptr;
 		end_ptr = dbg_ptr + memory_size;
 
 		doneflag = entry->done_flag;
-		dev_info(adapter->dev, "Start %s output, please wait...\n",
-			 entry->mem_name);
+		mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n",
+			    entry->mem_name);
 
 		do {
 			stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
 			if (RDWR_STATUS_FAILURE == stat)
-				goto done;
+				return;
 
 			reg_start = creg->fw_dump_start;
 			reg_end = creg->fw_dump_end;
@@ -2349,46 +2393,49 @@
 				if (dbg_ptr < end_ptr) {
 					dbg_ptr++;
 				} else {
-					dev_err(adapter->dev,
-						"Allocated buf not enough\n");
-					goto done;
+					mwifiex_dbg(adapter, ERROR,
+						    "Allocated buf not enough\n");
+					return;
 				}
 			}
 
 			if (stat != RDWR_STATUS_DONE)
 				continue;
 
-			dev_info(adapter->dev, "%s done: size=0x%tx\n",
-				 entry->mem_name, dbg_ptr - entry->mem_ptr);
+			mwifiex_dbg(adapter, DUMP,
+				    "%s done: size=0x%tx\n",
+				    entry->mem_name, dbg_ptr - entry->mem_ptr);
 			break;
 		} while (true);
 	}
-	dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
+	mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+}
 
-	kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
-
-done:
-	adapter->curr_mem_idx = 0;
+static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
+{
+	mwifiex_drv_info_dump(adapter);
+	mwifiex_pcie_fw_dump(adapter);
+	mwifiex_upload_device_dump(adapter);
 }
 
 static unsigned long iface_work_flags;
 static struct mwifiex_adapter *save_adapter;
 static void mwifiex_pcie_work(struct work_struct *work)
 {
-	if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+	if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
 			       &iface_work_flags))
-		mwifiex_pcie_fw_dump_work(save_adapter);
+		mwifiex_pcie_device_dump_work(save_adapter);
 }
 
 static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
 /* This function dumps FW information */
-static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
 {
 	save_adapter = adapter;
-	if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+	if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
 		return;
 
-	set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+	set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
 
 	schedule_work(&pcie_work);
 }
@@ -2418,45 +2465,50 @@
 
 	pci_set_master(pdev);
 
-	dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n");
+	mwifiex_dbg(adapter, INFO,
+		    "try set_consistent_dma_mask(32)\n");
 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (ret) {
-		dev_err(adapter->dev, "set_dma_mask(32) failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "set_dma_mask(32) failed\n");
 		goto err_set_dma_mask;
 	}
 
 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (ret) {
-		dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "set_consistent_dma_mask(64) failed\n");
 		goto err_set_dma_mask;
 	}
 
 	ret = pci_request_region(pdev, 0, DRV_NAME);
 	if (ret) {
-		dev_err(adapter->dev, "req_reg(0) error\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "req_reg(0) error\n");
 		goto err_req_region0;
 	}
 	card->pci_mmap = pci_iomap(pdev, 0, 0);
 	if (!card->pci_mmap) {
-		dev_err(adapter->dev, "iomap(0) error\n");
+		mwifiex_dbg(adapter, ERROR, "iomap(0) error\n");
 		ret = -EIO;
 		goto err_iomap0;
 	}
 	ret = pci_request_region(pdev, 2, DRV_NAME);
 	if (ret) {
-		dev_err(adapter->dev, "req_reg(2) error\n");
+		mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n");
 		goto err_req_region2;
 	}
 	card->pci_mmap1 = pci_iomap(pdev, 2, 0);
 	if (!card->pci_mmap1) {
-		dev_err(adapter->dev, "iomap(2) error\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "iomap(2) error\n");
 		ret = -EIO;
 		goto err_iomap2;
 	}
 
-	dev_dbg(adapter->dev,
-		"PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
-		card->pci_mmap, card->pci_mmap1);
+	mwifiex_dbg(adapter, INFO,
+		    "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
+		    card->pci_mmap, card->pci_mmap1);
 
 	card->cmdrsp_buf = NULL;
 	ret = mwifiex_pcie_create_txbd_ring(adapter);
@@ -2521,10 +2573,11 @@
 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
 	if (user_rmmod) {
-		dev_dbg(adapter->dev, "Clearing driver ready signature\n");
+		mwifiex_dbg(adapter, INFO,
+			    "Clearing driver ready signature\n");
 		if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
-			dev_err(adapter->dev,
-				"Failed to write driver not-ready signature\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "Failed to write driver not-ready signature\n");
 	}
 
 	if (pdev) {
@@ -2555,7 +2608,8 @@
 	ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
 			  "MRVL_PCIE", pdev);
 	if (ret) {
-		pr_err("request_irq failed: ret=%d\n", ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "request_irq failed: ret=%d\n", ret);
 		adapter->card = NULL;
 		return -1;
 	}
@@ -2582,7 +2636,8 @@
 	const struct mwifiex_pcie_card_reg *reg;
 
 	if (card) {
-		dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
+		mwifiex_dbg(adapter, INFO,
+			    "%s(): calling free_irq()\n", __func__);
 		free_irq(card->dev->irq, card->dev);
 
 		reg = card->pcie.reg;
@@ -2617,7 +2672,7 @@
 	.cleanup_mpa_buf =		NULL,
 	.init_fw_port =			mwifiex_pcie_init_fw_port,
 	.clean_pcie_ring =		mwifiex_clean_pcie_ring_buf,
-	.fw_dump =			mwifiex_pcie_fw_dump,
+	.device_dump =			mwifiex_pcie_device_dump,
 };
 
 /*
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0ffdb7c..baf9715 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -241,20 +241,21 @@
 	    * LinkSys WRT54G && bss_desc->privacy
 	    */
 	 ) {
-		dev_dbg(priv->adapter->dev, "info: %s: WPA:"
-			" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
-			"EncMode=%#x privacy=%#x\n", __func__,
-			(bss_desc->bcn_wpa_ie) ?
-			(*(bss_desc->bcn_wpa_ie)).
-			vend_hdr.element_id : 0,
-			(bss_desc->bcn_rsn_ie) ?
-			(*(bss_desc->bcn_rsn_ie)).
-			ieee_hdr.element_id : 0,
-			(priv->sec_info.wep_enabled) ? "e" : "d",
-			(priv->sec_info.wpa_enabled) ? "e" : "d",
-			(priv->sec_info.wpa2_enabled) ? "e" : "d",
-			priv->sec_info.encryption_mode,
-			bss_desc->privacy);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: %s: WPA:\t"
+			    "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+			    "EncMode=%#x privacy=%#x\n", __func__,
+			    (bss_desc->bcn_wpa_ie) ?
+			    (*bss_desc->bcn_wpa_ie).
+			    vend_hdr.element_id : 0,
+			    (bss_desc->bcn_rsn_ie) ?
+			    (*bss_desc->bcn_rsn_ie).
+			    ieee_hdr.element_id : 0,
+			    (priv->sec_info.wep_enabled) ? "e" : "d",
+			    (priv->sec_info.wpa_enabled) ? "e" : "d",
+			    (priv->sec_info.wpa2_enabled) ? "e" : "d",
+			    priv->sec_info.encryption_mode,
+			    bss_desc->privacy);
 		return true;
 	}
 	return false;
@@ -277,20 +278,21 @@
 		 * Privacy bit may NOT be set in some APs like
 		 * LinkSys WRT54G && bss_desc->privacy
 		 */
-		dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
-			" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
-			"EncMode=%#x privacy=%#x\n", __func__,
-			(bss_desc->bcn_wpa_ie) ?
-			(*(bss_desc->bcn_wpa_ie)).
-			vend_hdr.element_id : 0,
-			(bss_desc->bcn_rsn_ie) ?
-			(*(bss_desc->bcn_rsn_ie)).
-			ieee_hdr.element_id : 0,
-			(priv->sec_info.wep_enabled) ? "e" : "d",
-			(priv->sec_info.wpa_enabled) ? "e" : "d",
-			(priv->sec_info.wpa2_enabled) ? "e" : "d",
-			priv->sec_info.encryption_mode,
-			bss_desc->privacy);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: %s: WPA2:\t"
+			    "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+			    "EncMode=%#x privacy=%#x\n", __func__,
+			    (bss_desc->bcn_wpa_ie) ?
+			    (*bss_desc->bcn_wpa_ie).
+			    vend_hdr.element_id : 0,
+			    (bss_desc->bcn_rsn_ie) ?
+			    (*bss_desc->bcn_rsn_ie).
+			    ieee_hdr.element_id : 0,
+			    (priv->sec_info.wep_enabled) ? "e" : "d",
+			    (priv->sec_info.wpa_enabled) ? "e" : "d",
+			    (priv->sec_info.wpa2_enabled) ? "e" : "d",
+			    priv->sec_info.encryption_mode,
+			    bss_desc->privacy);
 		return true;
 	}
 	return false;
@@ -333,18 +335,19 @@
 	    ((!bss_desc->bcn_rsn_ie) ||
 	     ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
 	    priv->sec_info.encryption_mode && bss_desc->privacy) {
-		dev_dbg(priv->adapter->dev, "info: %s: dynamic "
-			"WEP: wpa_ie=%#x wpa2_ie=%#x "
-			"EncMode=%#x privacy=%#x\n",
-			__func__,
-			(bss_desc->bcn_wpa_ie) ?
-			(*(bss_desc->bcn_wpa_ie)).
-			vend_hdr.element_id : 0,
-			(bss_desc->bcn_rsn_ie) ?
-			(*(bss_desc->bcn_rsn_ie)).
-			ieee_hdr.element_id : 0,
-			priv->sec_info.encryption_mode,
-			bss_desc->privacy);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: %s: dynamic\t"
+			    "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
+			    "EncMode=%#x privacy=%#x\n",
+			    __func__,
+			    (bss_desc->bcn_wpa_ie) ?
+			    (*bss_desc->bcn_wpa_ie).
+			    vend_hdr.element_id : 0,
+			    (bss_desc->bcn_rsn_ie) ?
+			    (*bss_desc->bcn_rsn_ie).
+			    ieee_hdr.element_id : 0,
+			    priv->sec_info.encryption_mode,
+			    bss_desc->privacy);
 		return true;
 	}
 	return false;
@@ -383,19 +386,20 @@
 		return 0;
 
 	if (priv->wps.session_enable) {
-		dev_dbg(adapter->dev,
-			"info: return success directly in WPS period\n");
+		mwifiex_dbg(adapter, IOCTL,
+			    "info: return success directly in WPS period\n");
 		return 0;
 	}
 
 	if (bss_desc->chan_sw_ie_present) {
-		dev_err(adapter->dev,
-			"Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
+		mwifiex_dbg(adapter, INFO,
+			    "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
 		return -1;
 	}
 
 	if (mwifiex_is_bss_wapi(priv, bss_desc)) {
-		dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: return success for WAPI AP\n");
 		return 0;
 	}
 
@@ -405,7 +409,8 @@
 			return 0;
 		} else if (mwifiex_is_bss_static_wep(priv, bss_desc)) {
 			/* Static WEP enabled */
-			dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: Disable 11n in WEP mode.\n");
 			bss_desc->disable_11n = true;
 			return 0;
 		} else if (mwifiex_is_bss_wpa(priv, bss_desc)) {
@@ -418,9 +423,9 @@
 
 				if (mwifiex_is_wpa_oui_present
 						(bss_desc, CIPHER_SUITE_TKIP)) {
-					dev_dbg(adapter->dev,
-						"info: Disable 11n if AES "
-						"is not supported by AP\n");
+					mwifiex_dbg(adapter, INFO,
+						    "info: Disable 11n if AES\t"
+						    "is not supported by AP\n");
 					bss_desc->disable_11n = true;
 				} else {
 					return -1;
@@ -437,9 +442,9 @@
 
 				if (mwifiex_is_rsn_oui_present
 						(bss_desc, CIPHER_SUITE_TKIP)) {
-					dev_dbg(adapter->dev,
-						"info: Disable 11n if AES "
-						"is not supported by AP\n");
+					mwifiex_dbg(adapter, INFO,
+						    "info: Disable 11n if AES\t"
+						    "is not supported by AP\n");
 					bss_desc->disable_11n = true;
 				} else {
 					return -1;
@@ -455,17 +460,18 @@
 		}
 
 		/* Security doesn't match */
-		dev_dbg(adapter->dev,
-			"info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s "
-			"WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__,
-			(bss_desc->bcn_wpa_ie) ?
-			(*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0,
-			(bss_desc->bcn_rsn_ie) ?
-			(*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0,
-			(priv->sec_info.wep_enabled) ? "e" : "d",
-			(priv->sec_info.wpa_enabled) ? "e" : "d",
-			(priv->sec_info.wpa2_enabled) ? "e" : "d",
-			priv->sec_info.encryption_mode, bss_desc->privacy);
+		mwifiex_dbg(adapter, ERROR,
+			    "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
+			    "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
+			    __func__,
+			    (bss_desc->bcn_wpa_ie) ?
+			    (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
+			    (bss_desc->bcn_rsn_ie) ?
+			    (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
+			    (priv->sec_info.wep_enabled) ? "e" : "d",
+			    (priv->sec_info.wpa_enabled) ? "e" : "d",
+			    (priv->sec_info.wpa2_enabled) ? "e" : "d",
+			    priv->sec_info.encryption_mode, bss_desc->privacy);
 		return -1;
 	}
 
@@ -560,7 +566,8 @@
 	else
 		rates_size = mwifiex_get_supported_rates(priv, rates);
 
-	dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+	mwifiex_dbg(priv->adapter, CMD,
+		    "info: SCAN_CMD: Rates size = %d\n",
 		rates_size);
 	rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
 	rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
@@ -600,9 +607,9 @@
 	u8 radio_type;
 
 	if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
-		dev_dbg(priv->adapter->dev,
-			"info: Scan: Null detect: %p, %p, %p\n",
-		       scan_cfg_out, chan_tlv_out, scan_chan_list);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "info: Scan: Null detect: %p, %p, %p\n",
+			    scan_cfg_out, chan_tlv_out, scan_chan_list);
 		return -1;
 	}
 
@@ -645,16 +652,16 @@
 			}
 
 			radio_type = tmp_chan_list->radio_type;
-			dev_dbg(priv->adapter->dev,
-				"info: Scan: Chan(%3d), Radio(%d),"
-				" Mode(%d, %d), Dur(%d)\n",
-				tmp_chan_list->chan_number,
-				tmp_chan_list->radio_type,
-				tmp_chan_list->chan_scan_mode_bitmap
-				& MWIFIEX_PASSIVE_SCAN,
-				(tmp_chan_list->chan_scan_mode_bitmap
-				 & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
-				le16_to_cpu(tmp_chan_list->max_scan_time));
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: Scan: Chan(%3d), Radio(%d),\t"
+				    "Mode(%d, %d), Dur(%d)\n",
+				    tmp_chan_list->chan_number,
+				    tmp_chan_list->radio_type,
+				    tmp_chan_list->chan_scan_mode_bitmap
+				    & MWIFIEX_PASSIVE_SCAN,
+				    (tmp_chan_list->chan_scan_mode_bitmap
+				    & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
+				    le16_to_cpu(tmp_chan_list->max_scan_time));
 
 			/* Copy the current channel TLV to the command being
 			   prepared */
@@ -718,9 +725,11 @@
 		/* The total scan time should be less than scan command timeout
 		   value */
 		if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
-			dev_err(priv->adapter->dev, "total scan time %dms"
-				" is over limit (%dms), scan skipped\n",
-				total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "total scan time %dms\t"
+				    "is over limit (%dms), scan skipped\n",
+				    total_scan_time,
+				    MWIFIEX_MAX_TOTAL_SCAN_TIME);
 			ret = -1;
 			break;
 		}
@@ -905,9 +914,10 @@
 			tlv_pos += (sizeof(wildcard_ssid_tlv->header)
 				+ le16_to_cpu(wildcard_ssid_tlv->header.len));
 
-			dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
-				i, wildcard_ssid_tlv->ssid,
-				wildcard_ssid_tlv->max_ssid_length);
+			mwifiex_dbg(adapter, INFO,
+				    "info: scan: ssid[%d]: %s, %d\n",
+				    i, wildcard_ssid_tlv->ssid,
+				    wildcard_ssid_tlv->max_ssid_length);
 
 			/* Empty wildcard ssid with a maxlen will match many or
 			   potentially all SSIDs (maxlen == 32), therefore do
@@ -928,8 +938,9 @@
 			*filtered_scan = true;
 
 		if (user_scan_in->scan_chan_gap) {
-			dev_dbg(adapter->dev, "info: scan: channel gap = %d\n",
-				user_scan_in->scan_chan_gap);
+			mwifiex_dbg(adapter, INFO,
+				    "info: scan: channel gap = %d\n",
+				    user_scan_in->scan_chan_gap);
 			*max_chan_per_scan =
 					MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
 
@@ -961,8 +972,9 @@
 	   add tlv */
 	if (num_probes) {
 
-		dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
-			num_probes);
+		mwifiex_dbg(adapter, INFO,
+			    "info: scan: num_probes = %d\n",
+			    num_probes);
 
 		num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
 		num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
@@ -1003,7 +1015,8 @@
 
 	if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
 
-		dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: Scan: Using supplied channel list\n");
 
 		for (chan_idx = 0;
 		     chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX &&
@@ -1056,13 +1069,13 @@
 		    (user_scan_in->chan_list[0].chan_number ==
 		     priv->curr_bss_params.bss_descriptor.channel)) {
 			*scan_current_only = true;
-			dev_dbg(adapter->dev,
-				"info: Scan: Scanning current channel only\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: Scan: Scanning current channel only\n");
 		}
 		chan_num = chan_idx;
 	} else {
-		dev_dbg(adapter->dev,
-			"info: Scan: Creating full region channel list\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: Scan: Creating full region channel list\n");
 		chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
 							    scan_chan_list,
 							    *filtered_scan);
@@ -1094,8 +1107,9 @@
 	tlv_buf_left = tlv_buf_size;
 	*tlv_data = NULL;
 
-	dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
-		tlv_buf_size);
+	mwifiex_dbg(adapter, INFO,
+		    "info: SCAN_RESP: tlv_buf_size = %d\n",
+		    tlv_buf_size);
 
 	while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
 
@@ -1103,26 +1117,31 @@
 		tlv_len = le16_to_cpu(current_tlv->header.len);
 
 		if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
-			dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "SCAN_RESP: TLV buffer corrupt\n");
 			break;
 		}
 
 		if (req_tlv_type == tlv_type) {
 			switch (tlv_type) {
 			case TLV_TYPE_TSFTIMESTAMP:
-				dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
-					"timestamp TLV, len = %d\n", tlv_len);
+				mwifiex_dbg(adapter, INFO,
+					    "info: SCAN_RESP: TSF\t"
+					    "timestamp TLV, len = %d\n",
+					    tlv_len);
 				*tlv_data = current_tlv;
 				break;
 			case TLV_TYPE_CHANNELBANDLIST:
-				dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
-					" band list TLV, len = %d\n", tlv_len);
+				mwifiex_dbg(adapter, INFO,
+					    "info: SCAN_RESP: channel\t"
+					    "band list TLV, len = %d\n",
+					    tlv_len);
 				*tlv_data = current_tlv;
 				break;
 			default:
-				dev_err(adapter->dev,
-					"SCAN_RESP: unhandled TLV = %d\n",
-				       tlv_type);
+				mwifiex_dbg(adapter, ERROR,
+					    "SCAN_RESP: unhandled TLV = %d\n",
+					    tlv_type);
 				/* Give up, this seems corrupted */
 				return;
 			}
@@ -1177,8 +1196,9 @@
 		total_ie_len = element_len + sizeof(struct ieee_types_header);
 
 		if (bytes_left < total_ie_len) {
-			dev_err(adapter->dev, "err: InterpretIE: in processing"
-				" IE, bytes left < IE length\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "err: InterpretIE: in processing\t"
+				    "IE, bytes left < IE length\n");
 			return -1;
 		}
 		switch (element_id) {
@@ -1186,9 +1206,9 @@
 			bss_entry->ssid.ssid_len = element_len;
 			memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
 			       element_len);
-			dev_dbg(adapter->dev,
-				"info: InterpretIE: ssid: %-32s\n",
-				bss_entry->ssid.ssid);
+			mwifiex_dbg(adapter, INFO,
+				    "info: InterpretIE: ssid: %-32s\n",
+				    bss_entry->ssid.ssid);
 			break;
 
 		case WLAN_EID_SUPP_RATES:
@@ -1419,19 +1439,20 @@
 	unsigned long flags;
 
 	if (adapter->scan_processing) {
-		dev_err(adapter->dev, "cmd: Scan already in process...\n");
+		mwifiex_dbg(adapter, WARN,
+			    "cmd: Scan already in process...\n");
 		return -EBUSY;
 	}
 
 	if (priv->scan_block) {
-		dev_err(adapter->dev,
-			"cmd: Scan is blocked during association...\n");
+		mwifiex_dbg(adapter, WARN,
+			    "cmd: Scan is blocked during association...\n");
 		return -EBUSY;
 	}
 
 	if (adapter->surprise_removed || adapter->is_cmd_timedout) {
-		dev_err(adapter->dev,
-			"Ignore scan. Card removed or firmware in bad state\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Ignore scan. Card removed or firmware in bad state\n");
 		return -EFAULT;
 	}
 
@@ -1478,7 +1499,8 @@
 
 			/* Perform internal scan synchronously */
 			if (!priv->scan_request) {
-				dev_dbg(adapter->dev, "wait internal scan\n");
+				mwifiex_dbg(adapter, INFO,
+					    "wait internal scan\n");
 				mwifiex_wait_queue_complete(adapter, cmd_node);
 			}
 		} else {
@@ -1553,8 +1575,8 @@
 			ret = mwifiex_is_network_compatible(priv, bss_desc,
 							    priv->bss_mode);
 			if (ret)
-				dev_err(priv->adapter->dev,
-					"Incompatible network settings\n");
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "Incompatible network settings\n");
 			break;
 		default:
 			ret = 0;
@@ -1656,7 +1678,8 @@
 	 */
 	if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
 	    sizeof(struct mwifiex_fixed_bcn_param)) {
-		dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "InterpretIE: not enough bytes left\n");
 		return -EFAULT;
 	}
 
@@ -1669,7 +1692,8 @@
 		rssi = (-rssi) * 100;		/* Convert dBm to mBm */
 		current_ptr += sizeof(u8);
 		curr_bcn_bytes -= sizeof(u8);
-		dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+		mwifiex_dbg(adapter, INFO,
+			    "info: InterpretIE: RSSI=%d\n", rssi);
 	} else {
 		rssi = rssi_val;
 	}
@@ -1682,14 +1706,16 @@
 	beacon_period = le16_to_cpu(bcn_param->beacon_period);
 
 	cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
-	dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
-		cap_info_bitmap);
+	mwifiex_dbg(adapter, INFO,
+		    "info: InterpretIE: capabilities=0x%X\n",
+		    cap_info_bitmap);
 
 	/* Rest of the current buffer are IE's */
 	ie_buf = current_ptr;
 	ie_len = curr_bcn_bytes;
-	dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
-		curr_bcn_bytes);
+	mwifiex_dbg(adapter, INFO,
+		    "info: InterpretIE: IELength for this AP = %d\n",
+		    curr_bcn_bytes);
 
 	while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
 		u8 element_id, element_len;
@@ -1698,8 +1724,8 @@
 		element_len = *(current_ptr + 1);
 		if (curr_bcn_bytes < element_len +
 				sizeof(struct ieee_types_header)) {
-			dev_err(adapter->dev,
-				"%s: bytes left < IE length\n", __func__);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: bytes left < IE length\n", __func__);
 			return -EFAULT;
 		}
 		if (element_id == WLAN_EID_DS_PARAMS) {
@@ -1719,8 +1745,8 @@
 
 		/* Skip entry if on csa closed channel */
 		if (channel == priv->csa_chan) {
-			dev_dbg(adapter->dev,
-				"Dropping entry on csa closed channel\n");
+			mwifiex_dbg(adapter, WARN,
+				    "Dropping entry on csa closed channel\n");
 			return 0;
 		}
 
@@ -1751,7 +1777,7 @@
 			cfg80211_put_bss(priv->wdev.wiphy, bss);
 		}
 	} else {
-		dev_dbg(adapter->dev, "missing BSS channel IE\n");
+		mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
 	}
 
 	return 0;
@@ -1765,7 +1791,8 @@
 	if (adapter->curr_cmd->wait_q_enabled) {
 		adapter->cmd_wait_q.status = 0;
 		if (!priv->scan_request) {
-			dev_dbg(adapter->dev, "complete internal scan\n");
+			mwifiex_dbg(adapter, INFO,
+				    "complete internal scan\n");
 			mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 		}
 	}
@@ -1788,12 +1815,14 @@
 			mwifiex_complete_scan(priv);
 
 		if (priv->scan_request) {
-			dev_dbg(adapter->dev, "info: notifying scan done\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: notifying scan done\n");
 			cfg80211_scan_done(priv->scan_request, 0);
 			priv->scan_request = NULL;
 		} else {
 			priv->scan_aborting = false;
-			dev_dbg(adapter->dev, "info: scan already aborted\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: scan already aborted\n");
 		}
 	} else if ((priv->scan_aborting && !priv->scan_request) ||
 		   priv->scan_block) {
@@ -1809,12 +1838,14 @@
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
 		if (priv->scan_request) {
-			dev_dbg(adapter->dev, "info: aborting scan\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: aborting scan\n");
 			cfg80211_scan_done(priv->scan_request, 1);
 			priv->scan_request = NULL;
 		} else {
 			priv->scan_aborting = false;
-			dev_dbg(adapter->dev, "info: scan already aborted\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: scan already aborted\n");
 		}
 	} else {
 		/* Get scan command from scan_pending_q and put to
@@ -1877,8 +1908,9 @@
 
 
 	if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
-		dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
-			scan_rsp->number_of_sets);
+		mwifiex_dbg(adapter, ERROR,
+			    "SCAN_RESP: too many AP returned (%d)\n",
+			    scan_rsp->number_of_sets);
 		ret = -1;
 		goto check_next_scan;
 	}
@@ -1887,14 +1919,15 @@
 	mwifiex_11h_get_csa_closed_channel(priv);
 
 	bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
-	dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
-		bytes_left);
+	mwifiex_dbg(adapter, INFO,
+		    "info: SCAN_RESP: bss_descript_size %d\n",
+		    bytes_left);
 
 	scan_resp_size = le16_to_cpu(resp->size);
 
-	dev_dbg(adapter->dev,
-		"info: SCAN_RESP: returned %d APs before parsing\n",
-		scan_rsp->number_of_sets);
+	mwifiex_dbg(adapter, INFO,
+		    "info: SCAN_RESP: returned %d APs before parsing\n",
+		    scan_rsp->number_of_sets);
 
 	bss_info = scan_rsp->bss_desc_and_tlv_buffer;
 
@@ -2007,13 +2040,13 @@
 				       le16_to_cpu(fw_chan_stats->cca_scan_dur);
 		chan_stats.cca_busy_dur =
 				       le16_to_cpu(fw_chan_stats->cca_busy_dur);
-		dev_dbg(adapter->dev,
-			"chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
-			chan_stats.chan_num,
-			chan_stats.noise,
-			chan_stats.total_bss,
-			chan_stats.cca_scan_dur,
-			chan_stats.cca_busy_dur);
+		mwifiex_dbg(adapter, INFO,
+			    "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
+			    chan_stats.chan_num,
+			    chan_stats.noise,
+			    chan_stats.total_bss,
+			    chan_stats.cca_scan_dur,
+			    chan_stats.cca_busy_dur);
 		memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats,
 		       sizeof(struct mwifiex_chan_stats));
 		fw_chan_stats++;
@@ -2035,7 +2068,7 @@
 	unsigned long cmd_flags, scan_flags;
 	bool complete_scan = false;
 
-	dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+	mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
 
 	ext_scan_resp = &resp->params.ext_scan;
 
@@ -2048,8 +2081,8 @@
 		len = le16_to_cpu(tlv->len);
 
 		if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) {
-			dev_err(adapter->dev,
-				"error processing scan response TLVs");
+			mwifiex_dbg(adapter, ERROR,
+				    "error processing scan response TLVs");
 			break;
 		}
 
@@ -2075,8 +2108,8 @@
 			cmd_ptr = (void *)cmd_node->cmd_skb->data;
 			if (le16_to_cpu(cmd_ptr->command) ==
 			    HostCmd_CMD_802_11_SCAN_EXT) {
-				dev_dbg(priv->adapter->dev,
-					"Scan pending in command pending list");
+				mwifiex_dbg(adapter, INFO,
+					    "Scan pending in command pending list");
 				complete_scan = false;
 				break;
 			}
@@ -2114,17 +2147,20 @@
 	u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
 
 	if (num_of_set > MWIFIEX_MAX_AP) {
-		dev_err(adapter->dev,
-			"EXT_SCAN: Invalid number of AP returned (%d)!!\n",
-			num_of_set);
+		mwifiex_dbg(adapter, ERROR,
+			    "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+			    num_of_set);
 		ret = -1;
 		goto check_next_scan;
 	}
 
 	bytes_left = scan_resp_size;
-	dev_dbg(adapter->dev,
-		"EXT_SCAN: size %d, returned %d APs...",
-		scan_resp_size, num_of_set);
+	mwifiex_dbg(adapter, INFO,
+		    "EXT_SCAN: size %d, returned %d APs...",
+		    scan_resp_size, num_of_set);
+	mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf,
+			 scan_resp_size +
+			 sizeof(struct mwifiex_event_scan_result));
 
 	tlv = (struct mwifiex_ie_types_data *)scan_resp;
 
@@ -2132,7 +2168,8 @@
 		type = le16_to_cpu(tlv->header.type);
 		len = le16_to_cpu(tlv->header.len);
 		if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
-			dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "EXT_SCAN: Error bytes left < TLV length\n");
 			break;
 		}
 		scan_rsp_tlv = NULL;
@@ -2158,8 +2195,9 @@
 			len = le16_to_cpu(tlv->header.len);
 			if (bytes_left_for_tlv <
 			    sizeof(struct mwifiex_ie_types_header) + len) {
-				dev_err(adapter->dev,
-					"EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "EXT_SCAN: Error in processing TLV,\t"
+					    "bytes left < TLV length\n");
 				scan_rsp_tlv = NULL;
 				bytes_left_for_tlv = 0;
 				continue;
@@ -2199,8 +2237,8 @@
 		if (scan_info_tlv) {
 			rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
 			rssi *= 100;           /* Convert dBm to mBm */
-			dev_dbg(adapter->dev,
-				"info: InterpretIE: RSSI=%d\n", rssi);
+			mwifiex_dbg(adapter, INFO,
+				    "info: InterpretIE: RSSI=%d\n", rssi);
 			fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
 			radio_type = &scan_info_tlv->radio_type;
 		} else {
@@ -2271,13 +2309,14 @@
 	struct mwifiex_user_scan_cfg *scan_cfg;
 
 	if (adapter->scan_processing) {
-		dev_err(adapter->dev, "cmd: Scan already in process...\n");
+		mwifiex_dbg(adapter, WARN,
+			    "cmd: Scan already in process...\n");
 		return -EBUSY;
 	}
 
 	if (priv->scan_block) {
-		dev_err(adapter->dev,
-			"cmd: Scan is blocked during association...\n");
+		mwifiex_dbg(adapter, WARN,
+			    "cmd: Scan is blocked during association...\n");
 		return -EBUSY;
 	}
 
@@ -2309,8 +2348,9 @@
 	int ret;
 
 	if (down_interruptible(&priv->async_sem)) {
-		dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
-			__func__);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "%s: acquire semaphore fail\n",
+			    __func__);
 		return -1;
 	}
 
@@ -2400,8 +2440,9 @@
 
 	memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
 	       curr_bss->beacon_buf_size);
-	dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
-		priv->curr_bcn_size);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: current beacon saved %d\n",
+		    priv->curr_bcn_size);
 
 	curr_bss->beacon_buf = priv->curr_bcn_buf;
 
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d10320f..a0b121f 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -166,7 +166,8 @@
 	adapter = card->adapter;
 
 	if (!adapter->is_suspended) {
-		dev_warn(adapter->dev, "device already resumed\n");
+		mwifiex_dbg(adapter, WARN,
+			    "device already resumed\n");
 		return 0;
 	}
 
@@ -191,8 +192,6 @@
 	struct mwifiex_adapter *adapter;
 	struct mwifiex_private *priv;
 
-	pr_debug("info: SDIO func num=%d\n", func->num);
-
 	card = sdio_get_drvdata(func);
 	if (!card)
 		return;
@@ -201,6 +200,8 @@
 	if (!adapter || !adapter->priv_num)
 		return;
 
+	mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
+
 	if (user_rmmod) {
 		if (adapter->is_suspended)
 			mwifiex_sdio_resume(adapter->dev);
@@ -257,12 +258,14 @@
 
 	/* Enable the Host Sleep */
 	if (!mwifiex_enable_hs(adapter)) {
-		dev_err(adapter->dev, "cmd: failed to suspend\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cmd: failed to suspend\n");
 		adapter->hs_enabling = false;
 		return -EFAULT;
 	}
 
-	dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+	mwifiex_dbg(adapter, INFO,
+		    "cmd: suspend with MMC_PM_KEEP_POWER\n");
 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 
 	/* Indicate device suspended */
@@ -386,8 +389,8 @@
 	u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
 
 	if (adapter->is_suspended) {
-		dev_err(adapter->dev,
-			"%s: not allowed while suspended\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: not allowed while suspended\n", __func__);
 		return -1;
 	}
 
@@ -434,7 +437,8 @@
  */
 static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
-	dev_dbg(adapter->dev, "event: wakeup device...\n");
+	mwifiex_dbg(adapter, EVENT,
+		    "event: wakeup device...\n");
 
 	return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
 }
@@ -446,7 +450,8 @@
  */
 static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
 {
-	dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
+	mwifiex_dbg(adapter, EVENT,
+		    "cmd: wakeup device completed\n");
 
 	return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
 }
@@ -524,7 +529,8 @@
 	else
 		return -1;
 cont:
-	pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
+	mwifiex_dbg(adapter, INFO,
+		    "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
 
 	/* Set Host interrupt reset to read to clear */
 	if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, &reg))
@@ -556,10 +562,12 @@
 		ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port);
 		if (ret) {
 			i++;
-			dev_err(adapter->dev, "host_to_card, write iomem"
-					" (%d) failed: %d\n", i, ret);
+			mwifiex_dbg(adapter, ERROR,
+				    "host_to_card, write iomem\t"
+				    "(%d) failed: %d\n", i, ret);
 			if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
-				dev_err(adapter->dev, "write CFG reg failed\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "write CFG reg failed\n");
 
 			ret = -1;
 			if (i > MAX_WRITE_IOMEM_RETRY)
@@ -584,7 +592,8 @@
 	const struct mwifiex_sdio_card_reg *reg = card->reg;
 	u32 rd_bitmap = card->mp_rd_bitmap;
 
-	dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
+	mwifiex_dbg(adapter, DATA,
+		    "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
 
 	if (card->supports_sdio_new_mode) {
 		if (!(rd_bitmap & reg->data_port_mask))
@@ -598,8 +607,9 @@
 	    (card->mp_rd_bitmap & CTRL_PORT_MASK)) {
 		card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
 		*port = CTRL_PORT;
-		dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
-			*port, card->mp_rd_bitmap);
+		mwifiex_dbg(adapter, DATA,
+			    "data: port=%d mp_rd_bitmap=0x%08x\n",
+			    *port, card->mp_rd_bitmap);
 		return 0;
 	}
 
@@ -613,9 +623,9 @@
 	if (++card->curr_rd_port == card->max_ports)
 		card->curr_rd_port = reg->start_rd_port;
 
-	dev_dbg(adapter->dev,
-		"data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
-		*port, rd_bitmap, card->mp_rd_bitmap);
+	mwifiex_dbg(adapter, DATA,
+		    "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
+		    *port, rd_bitmap, card->mp_rd_bitmap);
 
 	return 0;
 }
@@ -633,7 +643,8 @@
 	const struct mwifiex_sdio_card_reg *reg = card->reg;
 	u32 wr_bitmap = card->mp_wr_bitmap;
 
-	dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
+	mwifiex_dbg(adapter, DATA,
+		    "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
 
 	if (!(wr_bitmap & card->mp_data_port_mask)) {
 		adapter->data_sent = true;
@@ -651,15 +662,16 @@
 	}
 
 	if ((card->has_control_mask) && (*port == CTRL_PORT)) {
-		dev_err(adapter->dev,
-			"invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
-			*port, card->curr_wr_port, wr_bitmap,
-			card->mp_wr_bitmap);
+		mwifiex_dbg(adapter, ERROR,
+			    "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+			    *port, card->curr_wr_port, wr_bitmap,
+			    card->mp_wr_bitmap);
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
-		*port, wr_bitmap, card->mp_wr_bitmap);
+	mwifiex_dbg(adapter, DATA,
+		    "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+		    *port, wr_bitmap, card->mp_wr_bitmap);
 
 	return 0;
 }
@@ -683,7 +695,8 @@
 		usleep_range(10, 20);
 	}
 
-	dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries);
+	mwifiex_dbg(adapter, ERROR,
+		    "poll card status failed, tries = %d\n", tries);
 
 	return -1;
 }
@@ -738,7 +751,7 @@
 	if (mwifiex_read_data_sync(adapter, card->mp_regs,
 				   card->reg->max_mp_regs,
 				   REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
-		dev_err(adapter->dev, "read mp_regs failed\n");
+		mwifiex_dbg(adapter, ERROR, "read mp_regs failed\n");
 		return;
 	}
 
@@ -751,7 +764,8 @@
 		 *	UP_LD_CMD_PORT_HOST_INT_STATUS
 		 * Clear the interrupt status register
 		 */
-		dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+		mwifiex_dbg(adapter, INTR,
+			    "int: sdio_ireg = %#x\n", sdio_ireg);
 		spin_lock_irqsave(&adapter->int_lock, flags);
 		adapter->int_status |= sdio_ireg;
 		spin_unlock_irqrestore(&adapter->int_lock, flags);
@@ -802,7 +816,8 @@
 	/* Request the SDIO IRQ */
 	ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
 	if (ret) {
-		dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "claim irq failed: ret=%d\n", ret);
 		goto out;
 	}
 
@@ -810,7 +825,8 @@
 	ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg,
 				       card->reg->host_int_enable);
 	if (ret) {
-		dev_err(adapter->dev, "enable host interrupt failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "enable host interrupt failed\n");
 		sdio_release_irq(func);
 	}
 
@@ -830,22 +846,25 @@
 	u32 nb;
 
 	if (!buffer) {
-		dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: buffer is NULL\n", __func__);
 		return -1;
 	}
 
 	ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1);
 
 	if (ret) {
-		dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: read iomem failed: %d\n", __func__,
 			ret);
 		return -1;
 	}
 
 	nb = le16_to_cpu(*(__le16 *) (buffer));
 	if (nb > npayload) {
-		dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n",
-			__func__, nb, npayload);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: invalid packet, nb=%d npayload=%d\n",
+			    __func__, nb, npayload);
 		return -1;
 	}
 
@@ -877,13 +896,14 @@
 	u32 i = 0;
 
 	if (!firmware_len) {
-		dev_err(adapter->dev,
-			"firmware image not found! Terminating download\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "firmware image not found! Terminating download\n");
 		return -1;
 	}
 
-	dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
-		firmware_len);
+	mwifiex_dbg(adapter, INFO,
+		    "info: downloading FW image (%d bytes)\n",
+		    firmware_len);
 
 	/* Assume that the allocated buffer is 8-byte aligned */
 	fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
@@ -897,8 +917,9 @@
 		ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
 						    DN_LD_CARD_RDY);
 		if (ret) {
-			dev_err(adapter->dev, "FW download with helper:"
-				" poll status timeout @ %d\n", offset);
+			mwifiex_dbg(adapter, ERROR,
+				    "FW download with helper:\t"
+				    "poll status timeout @ %d\n", offset);
 			goto done;
 		}
 
@@ -910,19 +931,19 @@
 			ret = mwifiex_read_reg(adapter, reg->base_0_reg,
 					       &base0);
 			if (ret) {
-				dev_err(adapter->dev,
-					"dev BASE0 register read failed: "
-					"base0=%#04X(%d). Terminating dnld\n",
-					base0, base0);
+				mwifiex_dbg(adapter, ERROR,
+					    "dev BASE0 register read failed:\t"
+					    "base0=%#04X(%d). Terminating dnld\n",
+					    base0, base0);
 				goto done;
 			}
 			ret = mwifiex_read_reg(adapter, reg->base_1_reg,
 					       &base1);
 			if (ret) {
-				dev_err(adapter->dev,
-					"dev BASE1 register read failed: "
-					"base1=%#04X(%d). Terminating dnld\n",
-					base1, base1);
+				mwifiex_dbg(adapter, ERROR,
+					    "dev BASE1 register read failed:\t"
+					    "base1=%#04X(%d). Terminating dnld\n",
+					    base1, base1);
 				goto done;
 			}
 			len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
@@ -936,9 +957,9 @@
 		if (!len) {
 			break;
 		} else if (len > MWIFIEX_UPLD_SIZE) {
-			dev_err(adapter->dev,
-				"FW dnld failed @ %d, invalid length %d\n",
-				offset, len);
+			mwifiex_dbg(adapter, ERROR,
+				    "FW dnld failed @ %d, invalid length %d\n",
+				    offset, len);
 			ret = -1;
 			goto done;
 		}
@@ -948,14 +969,15 @@
 		if (len & BIT(0)) {
 			i++;
 			if (i > MAX_WRITE_IOMEM_RETRY) {
-				dev_err(adapter->dev,
-					"FW dnld failed @ %d, over max retry\n",
-					offset);
+				mwifiex_dbg(adapter, ERROR,
+					    "FW dnld failed @ %d, over max retry\n",
+					    offset);
 				ret = -1;
 				goto done;
 			}
-			dev_err(adapter->dev, "CRC indicated by the helper:"
-				" len = 0x%04X, txlen = %d\n", len, txlen);
+			mwifiex_dbg(adapter, ERROR,
+				    "CRC indicated by the helper:\t"
+				    "len = 0x%04X, txlen = %d\n", len, txlen);
 			len &= ~BIT(0);
 			/* Setting this to 0 to resend from same offset */
 			txlen = 0;
@@ -978,11 +1000,12 @@
 					      MWIFIEX_SDIO_BLOCK_SIZE,
 					      adapter->ioport);
 		if (ret) {
-			dev_err(adapter->dev,
-				"FW download, write iomem (%d) failed @ %d\n",
-				i, offset);
+			mwifiex_dbg(adapter, ERROR,
+				    "FW download, write iomem (%d) failed @ %d\n",
+				    i, offset);
 			if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
-				dev_err(adapter->dev, "write CFG reg failed\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "write CFG reg failed\n");
 
 			ret = -1;
 			goto done;
@@ -991,8 +1014,8 @@
 		offset += txlen;
 	} while (true);
 
-	dev_notice(adapter->dev,
-		   "info: FW download over, size %d bytes\n", offset);
+	mwifiex_dbg(adapter, MSG,
+		    "info: FW download over, size %d bytes\n", offset);
 
 	ret = 0;
 done:
@@ -1066,18 +1089,20 @@
 		blk_num = *(data + BLOCK_NUMBER_OFFSET);
 		blk_size = adapter->sdio_rx_block_size * blk_num;
 		if (blk_size > total_pkt_len) {
-			dev_err(adapter->dev, "%s: error in pkt,\t"
-				"blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
-				__func__, blk_num, blk_size, total_pkt_len);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: error in blk_size,\t"
+				    "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+				    __func__, blk_num, blk_size, total_pkt_len);
 			break;
 		}
 		pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
 		pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
 					 2));
 		if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
-			dev_err(adapter->dev, "%s: error in pkt,\t"
-				"pkt_len=%d, blk_size=%d\n",
-				__func__, pkt_len, blk_size);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: error in pkt_len,\t"
+				    "pkt_len=%d, blk_size=%d\n",
+				    __func__, pkt_len, blk_size);
 			break;
 		}
 		skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
@@ -1116,7 +1141,8 @@
 
 	switch (upld_typ) {
 	case MWIFIEX_TYPE_AGGR_DATA:
-		dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: --- Rx: Aggr Data packet ---\n");
 		rx_info = MWIFIEX_SKB_RXCB(skb);
 		rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
 		if (adapter->rx_work_enabled) {
@@ -1130,7 +1156,8 @@
 		break;
 
 	case MWIFIEX_TYPE_DATA:
-		dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
+		mwifiex_dbg(adapter, DATA,
+			    "info: --- Rx: Data packet ---\n");
 		if (adapter->rx_work_enabled) {
 			skb_queue_tail(&adapter->rx_data_q, skb);
 			adapter->data_received = true;
@@ -1141,7 +1168,8 @@
 		break;
 
 	case MWIFIEX_TYPE_CMD:
-		dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
+		mwifiex_dbg(adapter, CMD,
+			    "info: --- Rx: Cmd Response ---\n");
 		/* take care of curr_cmd = NULL case */
 		if (!adapter->curr_cmd) {
 			cmd_buf = adapter->upld_buf;
@@ -1163,7 +1191,8 @@
 		break;
 
 	case MWIFIEX_TYPE_EVENT:
-		dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
+		mwifiex_dbg(adapter, EVENT,
+			    "info: --- Rx: Event ---\n");
 		adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
 
 		if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
@@ -1178,7 +1207,8 @@
 		break;
 
 	default:
-		dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
+		mwifiex_dbg(adapter, ERROR,
+			    "unknown upload type %#x\n", upld_typ);
 		dev_kfree_skb_any(skb);
 		break;
 	}
@@ -1210,16 +1240,18 @@
 
 	if ((card->has_control_mask) && (port == CTRL_PORT)) {
 		/* Read the command Resp without aggr */
-		dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
-			"response\n", __func__);
+		mwifiex_dbg(adapter, CMD,
+			    "info: %s: no aggregation for cmd\t"
+			    "response\n", __func__);
 
 		f_do_rx_cur = 1;
 		goto rx_curr_single;
 	}
 
 	if (!card->mpa_rx.enabled) {
-		dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
-			__func__);
+		mwifiex_dbg(adapter, WARN,
+			    "info: %s: rx aggregation disabled\n",
+			    __func__);
 
 		f_do_rx_cur = 1;
 		goto rx_curr_single;
@@ -1230,7 +1262,8 @@
 	    (card->has_control_mask && (card->mp_rd_bitmap &
 					(~((u32) CTRL_PORT_MASK))))) {
 		/* Some more data RX pending */
-		dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
+		mwifiex_dbg(adapter, INFO,
+			    "info: %s: not last packet\n", __func__);
 
 		if (MP_RX_AGGR_IN_PROGRESS(card)) {
 			if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
@@ -1247,7 +1280,8 @@
 
 	} else {
 		/* No more data RX pending */
-		dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
+		mwifiex_dbg(adapter, INFO,
+			    "info: %s: last packet\n", __func__);
 
 		if (MP_RX_AGGR_IN_PROGRESS(card)) {
 			f_do_rx_aggr = 1;
@@ -1262,14 +1296,16 @@
 	}
 
 	if (f_aggr_cur) {
-		dev_dbg(adapter->dev, "info: current packet aggregation\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: current packet aggregation\n");
 		/* Curr pkt can be aggregated */
 		mp_rx_aggr_setup(card, rx_len, port);
 
 		if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
 		    mp_rx_aggr_port_limit_reached(card)) {
-			dev_dbg(adapter->dev, "info: %s: aggregated packet "
-				"limit reached\n", __func__);
+			mwifiex_dbg(adapter, INFO,
+				    "info: %s: aggregated packet\t"
+				    "limit reached\n", __func__);
 			/* No more pkts allowed in Aggr buf, rx it */
 			f_do_rx_aggr = 1;
 		}
@@ -1277,8 +1313,9 @@
 
 	if (f_do_rx_aggr) {
 		/* do aggr RX now */
-		dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
-			card->mpa_rx.pkt_cnt);
+		mwifiex_dbg(adapter, DATA,
+			    "info: do_rx_aggr: num of packets: %d\n",
+			    card->mpa_rx.pkt_cnt);
 
 		if (card->supports_sdio_new_mode) {
 			int i;
@@ -1318,8 +1355,9 @@
 								 GFP_KERNEL |
 								 GFP_DMA);
 			if (!skb_deaggr) {
-				dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
-					pkt_len, pkt_type);
+				mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
+					    "drop pkt len=%d type=%d\n",
+					    pkt_len, pkt_type);
 				curr_ptr += len_arr[pind];
 				continue;
 			}
@@ -1339,12 +1377,12 @@
 				mwifiex_decode_rx_packet(adapter, skb_deaggr,
 							 pkt_type);
 			} else {
-				dev_err(adapter->dev, " drop wrong aggr pkt:\t"
-					"sdio_single_port_rx_aggr=%d\t"
-					"type=%d len=%d max_len=%d\n",
-					adapter->sdio_rx_aggr_enable,
-					pkt_type, pkt_len,
-					len_arr[pind]);
+				mwifiex_dbg(adapter, ERROR,
+					    "drop wrong aggr pkt:\t"
+					    "sdio_single_port_rx_aggr=%d\t"
+					    "type=%d len=%d max_len=%d\n",
+					    adapter->sdio_rx_aggr_enable,
+					    pkt_type, pkt_len, len_arr[pind]);
 				dev_kfree_skb_any(skb_deaggr);
 			}
 			curr_ptr += len_arr[pind];
@@ -1354,13 +1392,14 @@
 
 rx_curr_single:
 	if (f_do_rx_cur) {
-		dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
-			port, rx_len);
+		mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
+			    port, rx_len);
 
 		skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
 		if (!skb) {
-			dev_err(adapter->dev, "single skb allocated fail,\t"
-				"drop pkt port=%d len=%d\n", port, rx_len);
+			mwifiex_dbg(adapter, ERROR,
+				    "single skb allocated fail,\t"
+				    "drop pkt port=%d len=%d\n", port, rx_len);
 			if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
 						      card->mpa_rx.buf, rx_len,
 						      adapter->ioport + port))
@@ -1376,9 +1415,9 @@
 			goto error;
 		if (!adapter->sdio_rx_aggr_enable &&
 		    pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
-			dev_err(adapter->dev, "drop wrong pkt type %d\t"
-				"current SDIO RX Aggr not enabled\n",
-				pkt_type);
+			mwifiex_dbg(adapter, ERROR, "drop wrong pkt type %d\t"
+				    "current SDIO RX Aggr not enabled\n",
+				    pkt_type);
 			dev_kfree_skb_any(skb);
 			return 0;
 		}
@@ -1386,7 +1425,8 @@
 		mwifiex_decode_rx_packet(adapter, skb, pkt_type);
 	}
 	if (f_post_aggr_cur) {
-		dev_dbg(adapter->dev, "info: current packet aggregation\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: current packet aggregation\n");
 		/* Curr pkt can be aggregated */
 		mp_rx_aggr_setup(card, rx_len, port);
 	}
@@ -1458,7 +1498,7 @@
 		     MWIFIEX_RX_DATA_BUF_SIZE)
 			return -1;
 		rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-		dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+		mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
 
 		skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
 		if (!skb)
@@ -1469,17 +1509,17 @@
 		if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
 					      skb->len, adapter->ioport |
 							CMD_PORT_SLCT)) {
-			dev_err(adapter->dev,
-				"%s: failed to card_to_host", __func__);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: failed to card_to_host", __func__);
 			dev_kfree_skb_any(skb);
 			goto term_cmd;
 		}
 
 		if ((pkt_type != MWIFIEX_TYPE_CMD) &&
 		    (pkt_type != MWIFIEX_TYPE_EVENT))
-			dev_err(adapter->dev,
-				"%s:Received wrong packet on cmd port",
-				__func__);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s:Received wrong packet on cmd port",
+				    __func__);
 
 		mwifiex_decode_rx_packet(adapter, skb, pkt_type);
 	}
@@ -1495,12 +1535,13 @@
 		}
 		card->mp_wr_bitmap = bitmap;
 
-		dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
-			card->mp_wr_bitmap);
+		mwifiex_dbg(adapter, INTR,
+			    "int: DNLD: wr_bitmap=0x%x\n",
+			    card->mp_wr_bitmap);
 		if (adapter->data_sent &&
 		    (card->mp_wr_bitmap & card->mp_data_port_mask)) {
-			dev_dbg(adapter->dev,
-				"info:  <--- Tx DONE Interrupt --->\n");
+			mwifiex_dbg(adapter, INTR,
+				    "info:  <--- Tx DONE Interrupt --->\n");
 			adapter->data_sent = false;
 		}
 	}
@@ -1517,8 +1558,8 @@
 			adapter->cmd_sent = false;
 	}
 
-	dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
-		adapter->cmd_sent, adapter->data_sent);
+	mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n",
+		    adapter->cmd_sent, adapter->data_sent);
 	if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
 		bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
 		bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
@@ -1529,40 +1570,45 @@
 				((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
 		}
 		card->mp_rd_bitmap = bitmap;
-		dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
-			card->mp_rd_bitmap);
+		mwifiex_dbg(adapter, INTR,
+			    "int: UPLD: rd_bitmap=0x%x\n",
+			    card->mp_rd_bitmap);
 
 		while (true) {
 			ret = mwifiex_get_rd_port(adapter, &port);
 			if (ret) {
-				dev_dbg(adapter->dev,
-					"info: no more rd_port available\n");
+				mwifiex_dbg(adapter, INFO,
+					    "info: no more rd_port available\n");
 				break;
 			}
 			len_reg_l = reg->rd_len_p0_l + (port << 1);
 			len_reg_u = reg->rd_len_p0_u + (port << 1);
 			rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
 			rx_len |= (u16) card->mp_regs[len_reg_l];
-			dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
-				port, rx_len);
+			mwifiex_dbg(adapter, INFO,
+				    "info: RX: port=%d rx_len=%u\n",
+				    port, rx_len);
 			rx_blocks =
 				(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
 				 1) / MWIFIEX_SDIO_BLOCK_SIZE;
 			if (rx_len <= INTF_HEADER_LEN ||
 			    (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
 			     card->mpa_rx.buf_size) {
-				dev_err(adapter->dev, "invalid rx_len=%d\n",
-					rx_len);
+				mwifiex_dbg(adapter, ERROR,
+					    "invalid rx_len=%d\n",
+					    rx_len);
 				return -1;
 			}
 
 			rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-			dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+			mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n",
+				    rx_len);
 
 			if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
 							      port)) {
-				dev_err(adapter->dev, "card_to_host_mpa failed:"
-					" int status=%#x\n", sdio_ireg);
+				mwifiex_dbg(adapter, ERROR,
+					    "card_to_host_mpa failed: int status=%#x\n",
+					    sdio_ireg);
 				goto term_cmd;
 			}
 		}
@@ -1573,19 +1619,23 @@
 term_cmd:
 	/* terminate cmd */
 	if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
-		dev_err(adapter->dev, "read CFG reg failed\n");
+		mwifiex_dbg(adapter, ERROR, "read CFG reg failed\n");
 	else
-		dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
+		mwifiex_dbg(adapter, INFO,
+			    "info: CFG reg val = %d\n", cr);
 
 	if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
-		dev_err(adapter->dev, "write CFG reg failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "write CFG reg failed\n");
 	else
-		dev_dbg(adapter->dev, "info: write success\n");
+		mwifiex_dbg(adapter, INFO, "info: write success\n");
 
 	if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
-		dev_err(adapter->dev, "read CFG reg failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "read CFG reg failed\n");
 	else
-		dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
+		mwifiex_dbg(adapter, INFO,
+			    "info: CFG reg val =%x\n", cr);
 
 	return -1;
 }
@@ -1619,8 +1669,9 @@
 	if (!card->mpa_tx.enabled ||
 	    (card->has_control_mask && (port == CTRL_PORT)) ||
 	    (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
-		dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
-			__func__);
+		mwifiex_dbg(adapter, WARN,
+			    "info: %s: tx aggregation disabled\n",
+			    __func__);
 
 		f_send_cur_buf = 1;
 		goto tx_curr_single;
@@ -1628,8 +1679,9 @@
 
 	if (next_pkt_len) {
 		/* More pkt in TX queue */
-		dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
-			__func__);
+		mwifiex_dbg(adapter, INFO,
+			    "info: %s: more packets in queue.\n",
+			    __func__);
 
 		if (MP_TX_AGGR_IN_PROGRESS(card)) {
 			if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
@@ -1659,8 +1711,9 @@
 		}
 	} else {
 		/* Last pkt in TX queue */
-		dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
-			__func__);
+		mwifiex_dbg(adapter, INFO,
+			    "info: %s: Last packet in Tx Queue.\n",
+			    __func__);
 
 		if (MP_TX_AGGR_IN_PROGRESS(card)) {
 			/* some packs in Aggr buf already */
@@ -1677,8 +1730,9 @@
 	}
 
 	if (f_precopy_cur_buf) {
-		dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
-			__func__);
+		mwifiex_dbg(adapter, DATA,
+			    "data: %s: precopy current buffer\n",
+			    __func__);
 		MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
 
 		if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
@@ -1688,9 +1742,10 @@
 	}
 
 	if (f_send_aggr_buf) {
-		dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
-			__func__,
-				card->mpa_tx.start_port, card->mpa_tx.ports);
+		mwifiex_dbg(adapter, DATA,
+			    "data: %s: send aggr buffer: %d %d\n",
+			    __func__, card->mpa_tx.start_port,
+			    card->mpa_tx.ports);
 		if (card->supports_sdio_new_mode) {
 			u32 port_count;
 			int i;
@@ -1719,15 +1774,17 @@
 
 tx_curr_single:
 	if (f_send_cur_buf) {
-		dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
-			__func__, port);
+		mwifiex_dbg(adapter, DATA,
+			    "data: %s: send current buffer %d\n",
+			    __func__, port);
 		ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
 						 adapter->ioport + port);
 	}
 
 	if (f_postcopy_cur_buf) {
-		dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
-			__func__);
+		mwifiex_dbg(adapter, DATA,
+			    "data: %s: postcopy current buffer\n",
+			    __func__);
 		MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
 	}
 
@@ -1771,8 +1828,9 @@
 	if (type == MWIFIEX_TYPE_DATA) {
 		ret = mwifiex_get_wr_port_data(adapter, &port);
 		if (ret) {
-			dev_err(adapter->dev, "%s: no wr_port available\n",
-				__func__);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: no wr_port available\n",
+				    __func__);
 			return ret;
 		}
 	} else {
@@ -1781,8 +1839,9 @@
 
 		if (pkt_len <= INTF_HEADER_LEN ||
 		    pkt_len > MWIFIEX_UPLD_SIZE)
-			dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
-				__func__, payload, pkt_len);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: payload=%p, nb=%d\n",
+				    __func__, payload, pkt_len);
 
 		if (card->supports_sdio_new_mode)
 			port = CMD_PORT_SLCT;
@@ -1896,7 +1955,8 @@
 	ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
 	sdio_release_host(func);
 	if (ret) {
-		pr_err("cannot set SDIO block size\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "cannot set SDIO block size\n");
 		return ret;
 	}
 
@@ -1977,7 +2037,8 @@
 					     card->mp_tx_agg_buf_size,
 					     card->mp_rx_agg_buf_size);
 	if (ret) {
-		dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "failed to alloc sdio mp-a buffers\n");
 		kfree(card->mp_regs);
 		return -1;
 	}
@@ -2041,8 +2102,9 @@
 
 	card->curr_wr_port = reg->start_wr_port;
 
-	dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
-		port, card->mp_data_port_mask);
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: mp_end_port %d, data port mask 0x%x\n",
+		    port, card->mp_data_port_mask);
 }
 
 static struct mwifiex_adapter *save_adapter;
@@ -2059,7 +2121,7 @@
 	 * We run it in a totally independent workqueue.
 	 */
 
-	pr_err("Resetting card...\n");
+	mwifiex_dbg(adapter, WARN, "Resetting card...\n");
 	mmc_remove_host(target);
 	/* 200ms delay is based on experiment with sdhci controller */
 	mdelay(200);
@@ -2079,14 +2141,14 @@
 	sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
 		    &ret);
 	if (ret) {
-		dev_err(adapter->dev, "SDIO Write ERR\n");
+		mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
 		return RDWR_STATUS_FAILURE;
 	}
 	for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
 		ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
 				       &ret);
 		if (ret) {
-			dev_err(adapter->dev, "SDIO read err\n");
+			mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
 			return RDWR_STATUS_FAILURE;
 		}
 		if (ctrl_data == FW_DUMP_DONE)
@@ -2094,19 +2156,20 @@
 		if (doneflag && ctrl_data == doneflag)
 			return RDWR_STATUS_DONE;
 		if (ctrl_data != FW_DUMP_HOST_READY) {
-			dev_info(adapter->dev,
-				 "The ctrl reg was changed, re-try again!\n");
+			mwifiex_dbg(adapter, WARN,
+				    "The ctrl reg was changed, re-try again!\n");
 			sdio_writeb(card->func, FW_DUMP_HOST_READY,
 				    card->reg->fw_dump_ctrl, &ret);
 			if (ret) {
-				dev_err(adapter->dev, "SDIO write err\n");
+				mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
 				return RDWR_STATUS_FAILURE;
 			}
 		}
 		usleep_range(100, 200);
 	}
 	if (ctrl_data == FW_DUMP_HOST_READY) {
-		dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "Fail to pull ctrl_data\n");
 		return RDWR_STATUS_FAILURE;
 	}
 
@@ -2114,7 +2177,7 @@
 }
 
 /* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
 {
 	struct sdio_mmc_card *card = adapter->card;
 	int ret = 0;
@@ -2122,9 +2185,6 @@
 	u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
 	enum rdwr_status stat;
 	u32 memory_size;
-	static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
-
-	mwifiex_dump_drv_info(adapter);
 
 	if (!card->can_dump_fw)
 		return;
@@ -2142,7 +2202,7 @@
 	mwifiex_pm_wakeup_card(adapter);
 	sdio_claim_host(card->func);
 
-	dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+	mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
 
 	stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
 	if (stat == RDWR_STATUS_FAILURE)
@@ -2152,7 +2212,7 @@
 	/* Read the number of the memories which will dump */
 	dump_num = sdio_readb(card->func, reg, &ret);
 	if (ret) {
-		dev_err(adapter->dev, "SDIO read memory length err\n");
+		mwifiex_dbg(adapter, ERROR, "SDIO read memory length err\n");
 		goto done;
 	}
 
@@ -2169,7 +2229,7 @@
 		for (i = 0; i < 4; i++) {
 			read_reg = sdio_readb(card->func, reg, &ret);
 			if (ret) {
-				dev_err(adapter->dev, "SDIO read err\n");
+				mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
 				goto done;
 			}
 			memory_size |= (read_reg << i*8);
@@ -2177,25 +2237,33 @@
 		}
 
 		if (memory_size == 0) {
-			dev_info(adapter->dev, "Firmware dump Finished!\n");
+			mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n");
+			ret = mwifiex_write_reg(adapter,
+						card->reg->fw_dump_ctrl,
+						FW_DUMP_READ_DONE);
+			if (ret) {
+				mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
+				return;
+			}
 			break;
 		}
 
-		dev_info(adapter->dev,
-			 "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+		mwifiex_dbg(adapter, DUMP,
+			    "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
 		entry->mem_ptr = vmalloc(memory_size + 1);
 		entry->mem_size = memory_size;
 		if (!entry->mem_ptr) {
-			dev_err(adapter->dev, "Vmalloc %s failed\n",
-				entry->mem_name);
+			mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n",
+				    entry->mem_name);
 			goto done;
 		}
 		dbg_ptr = entry->mem_ptr;
 		end_ptr = dbg_ptr + memory_size;
 
 		doneflag = entry->done_flag;
-		dev_info(adapter->dev, "Start %s output, please wait...\n",
-			 entry->mem_name);
+		mwifiex_dbg(adapter, DUMP,
+			    "Start %s output, please wait...\n",
+			    entry->mem_name);
 
 		do {
 			stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
@@ -2207,39 +2275,43 @@
 			for (reg = reg_start; reg <= reg_end; reg++) {
 				*dbg_ptr = sdio_readb(card->func, reg, &ret);
 				if (ret) {
-					dev_err(adapter->dev,
-						"SDIO read err\n");
+					mwifiex_dbg(adapter, ERROR,
+						    "SDIO read err\n");
 					goto done;
 				}
 				if (dbg_ptr < end_ptr)
 					dbg_ptr++;
 				else
-					dev_err(adapter->dev,
-						"Allocated buf not enough\n");
+					mwifiex_dbg(adapter, ERROR,
+						    "Allocated buf not enough\n");
 			}
 
 			if (stat != RDWR_STATUS_DONE)
 				continue;
 
-			dev_info(adapter->dev, "%s done: size=0x%tx\n",
-				 entry->mem_name, dbg_ptr - entry->mem_ptr);
+			mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n",
+				    entry->mem_name, dbg_ptr - entry->mem_ptr);
 			break;
 		} while (1);
 	}
-	dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
-	kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+	mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
 
 done:
 	sdio_release_host(card->func);
-	adapter->curr_mem_idx = 0;
+}
+
+static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
+{
+	mwifiex_drv_info_dump(adapter);
+	mwifiex_sdio_fw_dump(adapter);
+	mwifiex_upload_device_dump(adapter);
 }
 
 static void mwifiex_sdio_work(struct work_struct *work)
 {
-	if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+	if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
 			       &iface_work_flags))
-		mwifiex_sdio_fw_dump_work(save_adapter);
+		mwifiex_sdio_device_dump_work(save_adapter);
 	if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
 			       &iface_work_flags))
 		mwifiex_sdio_card_reset_work(save_adapter);
@@ -2259,13 +2331,13 @@
 }
 
 /* This function dumps FW information */
-static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter)
 {
 	save_adapter = adapter;
-	if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+	if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
 		return;
 
-	set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+	set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
 	schedule_work(&sdio_work);
 }
 
@@ -2285,7 +2357,7 @@
 	if (!p)
 		return 0;
 
-	dev_info(adapter->dev, "SDIO register DUMP START\n");
+	mwifiex_dbg(adapter, MSG, "SDIO register dump start\n");
 
 	mwifiex_pm_wakeup_card(adapter);
 
@@ -2351,13 +2423,13 @@
 				reg++;
 		}
 
-		dev_info(adapter->dev, "%s\n", buf);
+		mwifiex_dbg(adapter, MSG, "%s\n", buf);
 		p += sprintf(p, "%s\n", buf);
 	}
 
 	sdio_release_host(cardp->func);
 
-	dev_info(adapter->dev, "SDIO register DUMP END\n");
+	mwifiex_dbg(adapter, MSG, "SDIO register dump end\n");
 
 	return p - drv_buf;
 }
@@ -2382,8 +2454,8 @@
 	.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
 	.event_complete = mwifiex_sdio_event_complete,
 	.card_reset = mwifiex_sdio_card_reset,
-	.fw_dump = mwifiex_sdio_fw_dump,
 	.reg_dump = mwifiex_sdio_reg_dump,
+	.device_dump = mwifiex_sdio_device_dump,
 	.deaggr_pkt = mwifiex_deaggr_sdio_pkt,
 };
 
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 49422f2..037adcd 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -77,8 +77,8 @@
 	struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
 
 	if (cmd_action != HostCmd_ACT_GEN_SET) {
-		dev_err(priv->adapter->dev,
-			"mac_control: only support set cmd\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "mac_control: only support set cmd\n");
 		return -1;
 	}
 
@@ -112,7 +112,8 @@
 {
 	struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
 
-	dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
+	mwifiex_dbg(priv->adapter, CMD,
+		    "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
 	cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
 	cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
 				- 1 + S_DS_GEN);
@@ -129,11 +130,11 @@
 		le16_add_cpu(&cmd->size, sizeof(u16));
 	}
 
-	dev_dbg(priv->adapter->dev,
-		"cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
-		" Value=0x%x\n",
-		cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
-		le16_to_cpu(*(__le16 *) snmp_mib->value));
+	mwifiex_dbg(priv->adapter, CMD,
+		    "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t"
+		    "OIDSize=0x%x, Value=0x%x\n",
+		    cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
+		    le16_to_cpu(*(__le16 *)snmp_mib->value));
 	return 0;
 }
 
@@ -356,9 +357,9 @@
 	    (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
 	    ((adapter->arp_filter_size > 0) &&
 	     (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
-		dev_dbg(adapter->dev,
-			"cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
-			adapter->arp_filter_size);
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
+			    adapter->arp_filter_size);
 		memcpy(((u8 *) hs_cfg) +
 		       sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
 		       adapter->arp_filter, adapter->arp_filter_size);
@@ -378,11 +379,11 @@
 		hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
 		hs_cfg->params.hs_config.gpio = hscfg_param->gpio;
 		hs_cfg->params.hs_config.gap = hscfg_param->gap;
-		dev_dbg(adapter->dev,
-			"cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
-		       hs_cfg->params.hs_config.conditions,
-		       hs_cfg->params.hs_config.gpio,
-		       hs_cfg->params.hs_config.gap);
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
+			    hs_cfg->params.hs_config.conditions,
+			    hs_cfg->params.hs_config.gpio,
+			    hs_cfg->params.hs_config.gap);
 	}
 
 	return 0;
@@ -462,7 +463,7 @@
 	/* Set AP MAC address */
 	memcpy(deauth->mac_addr, mac, ETH_ALEN);
 
-	dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
+	mwifiex_dbg(priv->adapter, CMD, "cmd: Deauth: %pM\n", deauth->mac_addr);
 
 	deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
 
@@ -540,9 +541,9 @@
 		} else if (!priv->wep_key[i].key_length) {
 			continue;
 		} else {
-			dev_err(priv->adapter->dev,
-				"key%d Length = %d is incorrect\n",
-			       (i + 1), priv->wep_key[i].key_length);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "key%d Length = %d is incorrect\n",
+				    (i + 1), priv->wep_key[i].key_length);
 			return -1;
 		}
 	}
@@ -562,7 +563,8 @@
 	u16 size, len = KEY_PARAMS_FIXED_LEN;
 
 	if (enc_key->is_igtk_key) {
-		dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+		mwifiex_dbg(adapter, INFO,
+			    "%s: Set CMAC AES Key\n", __func__);
 		if (enc_key->is_rx_seq_valid)
 			memcpy(km->key_param_set.key_params.cmac_aes.ipn,
 			       enc_key->pn, enc_key->pn_len);
@@ -575,7 +577,8 @@
 		       enc_key->key_material, enc_key->key_len);
 		len += sizeof(struct mwifiex_cmac_aes_param);
 	} else {
-		dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+		mwifiex_dbg(adapter, INFO,
+			    "%s: Set AES Key\n", __func__);
 		if (enc_key->is_rx_seq_valid)
 			memcpy(km->key_param_set.key_params.aes.pn,
 			       enc_key->pn, enc_key->pn_len);
@@ -619,7 +622,7 @@
 	km->action = cpu_to_le16(cmd_action);
 
 	if (cmd_action == HostCmd_ACT_GEN_GET) {
-		dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+		mwifiex_dbg(adapter, INFO, "%s: Get key\n", __func__);
 		km->key_param_set.key_idx =
 					enc_key->key_index & KEY_INDEX_MASK;
 		km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
@@ -646,7 +649,7 @@
 	       sizeof(struct mwifiex_ie_type_key_param_set_v2));
 
 	if (enc_key->key_disable) {
-		dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+		mwifiex_dbg(adapter, INFO, "%s: Remove key\n", __func__);
 		km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
 		km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
 		km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
@@ -667,7 +670,7 @@
 	memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
 
 	if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
-		dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+		mwifiex_dbg(adapter, INFO, "%s: Set WEP Key\n", __func__);
 		len += sizeof(struct mwifiex_wep_param);
 		km->key_param_set.len = cpu_to_le16(len);
 		km->key_param_set.key_type = KEY_TYPE_ID_WEP;
@@ -710,7 +713,7 @@
 		key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
 
 	if (enc_key->is_wapi_key) {
-		dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+		mwifiex_dbg(adapter, INFO, "%s: Set WAPI Key\n", __func__);
 		km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
 		memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
 		       PN_LEN);
@@ -750,7 +753,8 @@
 		return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
 
 	if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
-		dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+		mwifiex_dbg(adapter, INFO,
+			    "%s: Set TKIP Key\n", __func__);
 		if (enc_key->is_rx_seq_valid)
 			memcpy(km->key_param_set.key_params.tkip.pn,
 			       enc_key->pn, enc_key->pn_len);
@@ -814,7 +818,7 @@
 		memset(&key_material->key_param_set, 0,
 		       sizeof(struct mwifiex_ie_type_key_param_set));
 	if (enc_key->is_wapi_key) {
-		dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
+		mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n");
 		key_material->key_param_set.key_type_id =
 						cpu_to_le16(KEY_TYPE_ID_WAPI);
 		if (cmd_oid == KEY_INFO_ENABLED)
@@ -860,7 +864,7 @@
 	}
 	if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
 		if (enc_key->is_igtk_key) {
-			dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+			mwifiex_dbg(priv->adapter, CMD, "cmd: CMAC_AES\n");
 			key_material->key_param_set.key_type_id =
 					cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
 			if (cmd_oid == KEY_INFO_ENABLED)
@@ -873,7 +877,7 @@
 			key_material->key_param_set.key_info |=
 							cpu_to_le16(KEY_IGTK);
 		} else {
-			dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+			mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_AES\n");
 			key_material->key_param_set.key_type_id =
 						cpu_to_le16(KEY_TYPE_ID_AES);
 			if (cmd_oid == KEY_INFO_ENABLED)
@@ -892,7 +896,7 @@
 							cpu_to_le16(KEY_MCAST);
 		}
 	} else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
-		dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
+		mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_TKIP\n");
 		key_material->key_param_set.key_type_id =
 						cpu_to_le16(KEY_TYPE_ID_TKIP);
 		key_material->key_param_set.key_info =
@@ -999,7 +1003,8 @@
 		&domain_info->domain;
 	u8 no_of_triplet = adapter->domain_reg.no_of_triplet;
 
-	dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
+	mwifiex_dbg(adapter, INFO,
+		    "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
 
 	cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO);
 	domain_info->action = cpu_to_le16(cmd_action);
@@ -1071,6 +1076,26 @@
 	return 0;
 }
 
+/* This function prepares command buffer to get/set memory location value.
+ */
+static int
+mwifiex_cmd_mem_access(struct host_cmd_ds_command *cmd, u16 cmd_action,
+		       void *pdata_buf)
+{
+	struct mwifiex_ds_mem_rw *mem_rw = (void *)pdata_buf;
+	struct host_cmd_ds_mem_access *mem_access = (void *)&cmd->params.mem;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_MEM_ACCESS);
+	cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mem_access) +
+				S_DS_GEN);
+
+	mem_access->action = cpu_to_le16(cmd_action);
+	mem_access->addr = cpu_to_le32(mem_rw->addr);
+	mem_access->value = cpu_to_le32(mem_rw->value);
+
+	return 0;
+}
+
 /*
  * This function prepares command to set/get register value.
  *
@@ -1215,8 +1240,9 @@
 						(u32)(card->sleep_cookie_pbase);
 		host_spec->sleep_cookie_addr_hi =
 				 (u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
-		dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n",
-			host_spec->sleep_cookie_addr_lo);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "sleep_cook_lo phy addr: 0x%x\n",
+			    host_spec->sleep_cookie_addr_lo);
 	}
 
 	return 0;
@@ -1243,7 +1269,8 @@
 				S_DS_GEN);
 
 	subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
-	dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
+	mwifiex_dbg(priv->adapter, CMD,
+		    "cmd: action: %d\n", subsc_evt_cfg->action);
 
 	/*For query requests, no configuration TLV structures are to be added.*/
 	if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
@@ -1252,14 +1279,15 @@
 	subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
 
 	event_bitmap = subsc_evt_cfg->events;
-	dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
-		event_bitmap);
+	mwifiex_dbg(priv->adapter, CMD, "cmd: event bitmap : %16x\n",
+		    event_bitmap);
 
 	if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
 	     (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
 	    (event_bitmap == 0)) {
-		dev_dbg(priv->adapter->dev, "Error: No event specified "
-			"for bitwise action type\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Error: No event specified\t"
+			    "for bitwise action type\n");
 		return -EINVAL;
 	}
 
@@ -1284,10 +1312,11 @@
 		rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
 		rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
 
-		dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
-			"RSSI:-%d dBm, Freq:%d\n",
-			subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
-			subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "Cfg Beacon Low Rssi event,\t"
+			    "RSSI:-%d dBm, Freq:%d\n",
+			    subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
+			    subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
 
 		pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
 		le16_add_cpu(&cmd->size,
@@ -1304,10 +1333,11 @@
 		rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
 		rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
 
-		dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
-			"RSSI:-%d dBm, Freq:%d\n",
-			subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
-			subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "Cfg Beacon High Rssi event,\t"
+			    "RSSI:-%d dBm, Freq:%d\n",
+			    subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
+			    subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
 
 		pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
 		le16_add_cpu(&cmd->size,
@@ -1463,12 +1493,14 @@
 						data, len);
 		if (ret)
 			return ret;
-		dev_dbg(adapter->dev,
-			"download cfg_data from device tree: %s\n", prop->name);
+		mwifiex_dbg(adapter, INFO,
+			    "download cfg_data from device tree: %s\n",
+			    prop->name);
 	} else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
 		len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
 					    adapter->cal_data->size, data);
-		dev_dbg(adapter->dev, "download cfg_data from config file\n");
+		mwifiex_dbg(adapter, INFO,
+			    "download cfg_data from config file\n");
 	} else {
 		return -1;
 	}
@@ -1583,9 +1615,9 @@
 		tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
 
 		if (!params) {
-			dev_err(priv->adapter->dev,
-				"TDLS config params not available for %pM\n",
-				oper->peer_mac);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "TDLS config params not available for %pM\n",
+				    oper->peer_mac);
 			return -ENODATA;
 		}
 
@@ -1663,7 +1695,7 @@
 
 		break;
 	default:
-		dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n");
 		return -ENOTSUPP;
 	}
 
@@ -1870,8 +1902,8 @@
 		ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf);
 		break;
 	case HostCmd_CMD_WMM_GET_STATUS:
-		dev_dbg(priv->adapter->dev,
-			"cmd: WMM: WMM_GET_STATUS cmd sent\n");
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: WMM: WMM_GET_STATUS cmd sent\n");
 		cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS);
 		cmd_ptr->size =
 			cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) +
@@ -1885,6 +1917,9 @@
 	case HostCmd_CMD_802_11_SCAN_EXT:
 		ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
 		break;
+	case HostCmd_CMD_MEM_ACCESS:
+		ret = mwifiex_cmd_mem_access(cmd_ptr, cmd_action, data_buf);
+		break;
 	case HostCmd_CMD_MAC_REG_ACCESS:
 	case HostCmd_CMD_BBP_REG_ACCESS:
 	case HostCmd_CMD_RF_REG_ACCESS:
@@ -1932,8 +1967,8 @@
 						   data_buf);
 		break;
 	default:
-		dev_err(priv->adapter->dev,
-			"PREP_CMD: unknown cmd- %#x\n", cmd_no);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
 		ret = -1;
 		break;
 	}
@@ -2024,8 +2059,8 @@
 					       &sdio_sp_rx_aggr_enable,
 					       true);
 			if (ret) {
-				dev_err(priv->adapter->dev,
-					"error while enabling SP aggregation..disable it");
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "error while enabling SP aggregation..disable it");
 				adapter->sdio_rx_aggr_enable = false;
 			}
 		}
@@ -2108,8 +2143,8 @@
 				       HostCmd_ACT_GEN_SET, DOT11D_I,
 				       &state_11d, true);
 		if (ret)
-			dev_err(priv->adapter->dev,
-				"11D: failed to enable 11D\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "11D: failed to enable 11D\n");
 	}
 
 	/* Send cmd to FW to configure 11n specific configuration
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 88dc6b6..b645884 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -49,8 +49,9 @@
 	struct host_cmd_ds_802_11_ps_mode_enh *pm;
 	unsigned long flags;
 
-	dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
-		resp->command, resp->result);
+	mwifiex_dbg(adapter, ERROR,
+		    "CMD_RESP: cmd %#x error, result=%#x\n",
+		    resp->command, resp->result);
 
 	if (adapter->curr_cmd->wait_q_enabled)
 		adapter->cmd_wait_q.status = -1;
@@ -58,9 +59,9 @@
 	switch (le16_to_cpu(resp->command)) {
 	case HostCmd_CMD_802_11_PS_MODE_ENH:
 		pm = &resp->params.psmode_enh;
-		dev_err(adapter->dev,
-			"PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
-			resp->result, le16_to_cpu(pm->action));
+		mwifiex_dbg(adapter, ERROR,
+			    "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
+			    resp->result, le16_to_cpu(pm->action));
 		/* We do not re-try enter-ps command in ad-hoc mode. */
 		if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
 		    (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
@@ -91,7 +92,8 @@
 		break;
 
 	case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
-		dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+		mwifiex_dbg(adapter, MSG,
+			    "SDIO RX single-port aggregation Not support\n");
 		break;
 
 	default:
@@ -187,29 +189,34 @@
 	u16 query_type = le16_to_cpu(smib->query_type);
 	u32 ul_temp;
 
-	dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
-		" query_type = %#x, buf size = %#x\n",
-		oid, query_type, le16_to_cpu(smib->buf_size));
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: SNMP_RESP: oid value = %#x,\t"
+		    "query_type = %#x, buf size = %#x\n",
+		    oid, query_type, le16_to_cpu(smib->buf_size));
 	if (query_type == HostCmd_ACT_GEN_GET) {
 		ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
 		if (data_buf)
 			*data_buf = ul_temp;
 		switch (oid) {
 		case FRAG_THRESH_I:
-			dev_dbg(priv->adapter->dev,
-				"info: SNMP_RESP: FragThsd =%u\n", ul_temp);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: SNMP_RESP: FragThsd =%u\n",
+				    ul_temp);
 			break;
 		case RTS_THRESH_I:
-			dev_dbg(priv->adapter->dev,
-				"info: SNMP_RESP: RTSThsd =%u\n", ul_temp);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: SNMP_RESP: RTSThsd =%u\n",
+				    ul_temp);
 			break;
 		case SHORT_RETRY_LIM_I:
-			dev_dbg(priv->adapter->dev,
-				"info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: SNMP_RESP: TxRetryCount=%u\n",
+				    ul_temp);
 			break;
 		case DTIM_PERIOD_I:
-			dev_dbg(priv->adapter->dev,
-				"info: SNMP_RESP: DTIM period=%u\n", ul_temp);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: SNMP_RESP: DTIM period=%u\n",
+				    ul_temp);
 		default:
 			break;
 		}
@@ -426,14 +433,15 @@
 			priv->tx_power_level = (u16) pg->power_min;
 		break;
 	default:
-		dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
-			action);
+		mwifiex_dbg(adapter, ERROR,
+			    "CMD_RESP: unknown cmd action %d\n",
+			    action);
 		return 0;
 	}
-	dev_dbg(adapter->dev,
-		"info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
-	       priv->tx_power_level, priv->max_tx_power_level,
-	       priv->min_tx_power_level);
+	mwifiex_dbg(adapter, INFO,
+		    "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
+		    priv->tx_power_level, priv->max_tx_power_level,
+		    priv->min_tx_power_level);
 
 	return 0;
 }
@@ -454,10 +462,10 @@
 		priv->min_tx_power_level = txp->min_power;
 	}
 
-	dev_dbg(priv->adapter->dev,
-		"Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
-		priv->tx_power_level, priv->max_tx_power_level,
-		priv->min_tx_power_level);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
+		    priv->tx_power_level, priv->max_tx_power_level,
+		    priv->min_tx_power_level);
 
 	return 0;
 }
@@ -473,18 +481,18 @@
 	struct mwifiex_adapter *adapter = priv->adapter;
 
 	if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
-		dev_dbg(adapter->dev,
-			"RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
-			" Rx action = 0x%x, Rx Mode = 0x%04x\n",
-			le16_to_cpu(ant_mimo->action_tx),
-			le16_to_cpu(ant_mimo->tx_ant_mode),
-			le16_to_cpu(ant_mimo->action_rx),
-			le16_to_cpu(ant_mimo->rx_ant_mode));
+		mwifiex_dbg(adapter, INFO,
+			    "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t"
+			    "Rx action = 0x%x, Rx Mode = 0x%04x\n",
+			    le16_to_cpu(ant_mimo->action_tx),
+			    le16_to_cpu(ant_mimo->tx_ant_mode),
+			    le16_to_cpu(ant_mimo->action_rx),
+			    le16_to_cpu(ant_mimo->rx_ant_mode));
 	else
-		dev_dbg(adapter->dev,
-			"RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
-			le16_to_cpu(ant_siso->action),
-			le16_to_cpu(ant_siso->ant_mode));
+		mwifiex_dbg(adapter, INFO,
+			    "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
+			    le16_to_cpu(ant_siso->action),
+			    le16_to_cpu(ant_siso->ant_mode));
 
 	return 0;
 }
@@ -502,8 +510,8 @@
 
 	memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
 
-	dev_dbg(priv->adapter->dev,
-		"info: set mac address: %pM\n", priv->curr_addr);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: set mac address: %pM\n", priv->curr_addr);
 
 	return 0;
 }
@@ -587,7 +595,8 @@
 
 	if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
 		if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
-			dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: key: GTK is set\n");
 			priv->wpa_is_gtk_set = true;
 			priv->scan_block = false;
 		}
@@ -617,7 +626,7 @@
 	key_v2 = &resp->params.key_material_v2;
 	if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
 		if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
-			dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+			mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
 			priv->wpa_is_gtk_set = true;
 			priv->scan_block = false;
 		}
@@ -663,14 +672,14 @@
 				- IEEE80211_COUNTRY_STRING_LEN)
 			      / sizeof(struct ieee80211_country_ie_triplet));
 
-	dev_dbg(priv->adapter->dev,
-		"info: 11D Domain Info Resp: no_of_triplet=%d\n",
-		no_of_triplet);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: 11D Domain Info Resp: no_of_triplet=%d\n",
+		    no_of_triplet);
 
 	if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
-		dev_warn(priv->adapter->dev,
-			 "11D: invalid number of triplets %d returned\n",
-			 no_of_triplet);
+		mwifiex_dbg(priv->adapter, FATAL,
+			    "11D: invalid number of triplets %d returned\n",
+			    no_of_triplet);
 		return -1;
 	}
 
@@ -680,8 +689,8 @@
 	case HostCmd_ACT_GEN_GET:
 		break;
 	default:
-		dev_err(priv->adapter->dev,
-			"11D: invalid action:%d\n", domain_info->action);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "11D: invalid action:%d\n", domain_info->action);
 		return -1;
 	}
 
@@ -741,6 +750,19 @@
 	return 0;
 }
 
+/* This function handles the command response of mem_access command
+ */
+static int
+mwifiex_ret_mem_access(struct mwifiex_private *priv,
+		       struct host_cmd_ds_command *resp, void *pioctl_buf)
+{
+	struct host_cmd_ds_mem_access *mem = (void *)&resp->params.mem;
+
+	priv->mem_rw.addr = le32_to_cpu(mem->addr);
+	priv->mem_rw.value = le32_to_cpu(mem->value);
+
+	return 0;
+}
 /*
  * This function handles the command response of register access.
  *
@@ -830,12 +852,12 @@
 	if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
 		return 0;
 
-	dev_dbg(priv->adapter->dev,
-		"info: new BSSID %pM\n", ibss_coal_resp->bssid);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: new BSSID %pM\n", ibss_coal_resp->bssid);
 
 	/* If rsp has NULL BSSID, Just return..... No Action */
 	if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
-		dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
+		mwifiex_dbg(priv->adapter, FATAL, "new BSSID is NULL\n");
 		return 0;
 	}
 
@@ -871,48 +893,48 @@
 	case ACT_TDLS_DELETE:
 		if (reason) {
 			if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
-				dev_dbg(priv->adapter->dev,
-					"TDLS link delete for %pM failed: reason %d\n",
-					cmd_tdls_oper->peer_mac, reason);
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "TDLS link delete for %pM failed: reason %d\n",
+					    cmd_tdls_oper->peer_mac, reason);
 			else
-				dev_err(priv->adapter->dev,
-					"TDLS link delete for %pM failed: reason %d\n",
-					cmd_tdls_oper->peer_mac, reason);
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "TDLS link delete for %pM failed: reason %d\n",
+					    cmd_tdls_oper->peer_mac, reason);
 		} else {
-			dev_dbg(priv->adapter->dev,
-				"TDLS link delete for %pM successful\n",
-				cmd_tdls_oper->peer_mac);
+			mwifiex_dbg(priv->adapter, MSG,
+				    "TDLS link delete for %pM successful\n",
+				    cmd_tdls_oper->peer_mac);
 		}
 		break;
 	case ACT_TDLS_CREATE:
 		if (reason) {
-			dev_err(priv->adapter->dev,
-				"TDLS link creation for %pM failed: reason %d",
-				cmd_tdls_oper->peer_mac, reason);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "TDLS link creation for %pM failed: reason %d",
+				    cmd_tdls_oper->peer_mac, reason);
 			if (node && reason != TDLS_ERR_LINK_EXISTS)
 				node->tdls_status = TDLS_SETUP_FAILURE;
 		} else {
-			dev_dbg(priv->adapter->dev,
-				"TDLS link creation for %pM successful",
-				cmd_tdls_oper->peer_mac);
+			mwifiex_dbg(priv->adapter, MSG,
+				    "TDLS link creation for %pM successful",
+				    cmd_tdls_oper->peer_mac);
 		}
 		break;
 	case ACT_TDLS_CONFIG:
 		if (reason) {
-			dev_err(priv->adapter->dev,
-				"TDLS link config for %pM failed, reason %d\n",
-				cmd_tdls_oper->peer_mac, reason);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "TDLS link config for %pM failed, reason %d\n",
+				    cmd_tdls_oper->peer_mac, reason);
 			if (node)
 				node->tdls_status = TDLS_SETUP_FAILURE;
 		} else {
-			dev_dbg(priv->adapter->dev,
-				"TDLS link config for %pM successful\n",
-				cmd_tdls_oper->peer_mac);
+			mwifiex_dbg(priv->adapter, MSG,
+				    "TDLS link config for %pM successful\n",
+				    cmd_tdls_oper->peer_mac);
 		}
 		break;
 	default:
-		dev_err(priv->adapter->dev,
-			"Unknown TDLS command action response %d", action);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Unknown TDLS command action response %d", action);
 		return -1;
 	}
 
@@ -929,8 +951,30 @@
 
 	/* For every subscribe event command (Get/Set/Clear), FW reports the
 	 * current set of subscribed events*/
-	dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
-		le16_to_cpu(cmd_sub_event->events));
+	mwifiex_dbg(priv->adapter, EVENT,
+		    "Bitmap of currently subscribed events: %16x\n",
+		    le16_to_cpu(cmd_sub_event->events));
+
+	return 0;
+}
+
+static int mwifiex_ret_uap_sta_list(struct mwifiex_private *priv,
+				    struct host_cmd_ds_command *resp)
+{
+	struct host_cmd_ds_sta_list *sta_list =
+		&resp->params.sta_list;
+	struct mwifiex_ie_types_sta_info *sta_info = (void *)&sta_list->tlv;
+	int i;
+	struct mwifiex_sta_node *sta_node;
+
+	for (i = 0; i < sta_list->sta_count; i++) {
+		sta_node = mwifiex_get_sta_entry(priv, sta_info->mac);
+		if (unlikely(!sta_node))
+			continue;
+
+		sta_node->stats.rssi = sta_info->rssi;
+		sta_info++;
+	}
 
 	return 0;
 }
@@ -940,7 +984,7 @@
 				struct host_cmd_ds_command *resp)
 {
 	if (resp->result != HostCmd_RESULT_OK) {
-		dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Cal data cmd resp failed\n");
 		return -1;
 	}
 
@@ -1008,8 +1052,8 @@
 		break;
 	case HostCmd_CMD_802_11_BG_SCAN_QUERY:
 		ret = mwifiex_ret_802_11_scan(priv, resp);
-		dev_dbg(adapter->dev,
-			"info: CMD_RESP: BG_SCAN result is ready!\n");
+		mwifiex_dbg(adapter, CMD,
+			    "info: CMD_RESP: BG_SCAN result is ready!\n");
 		break;
 	case HostCmd_CMD_TXPWR_CFG:
 		ret = mwifiex_ret_tx_power_cfg(priv, resp);
@@ -1088,8 +1132,8 @@
 					/ MWIFIEX_SDIO_BLOCK_SIZE)
 				       * MWIFIEX_SDIO_BLOCK_SIZE;
 		adapter->curr_tx_buf_size = adapter->tx_buf_size;
-		dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
-			adapter->curr_tx_buf_size);
+		mwifiex_dbg(adapter, CMD, "cmd: curr_tx_buf_size=%d\n",
+			    adapter->curr_tx_buf_size);
 
 		if (adapter->if_ops.update_mp_end_port)
 			adapter->if_ops.update_mp_end_port(adapter,
@@ -1103,6 +1147,9 @@
 	case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
 		ret = mwifiex_ret_ibss_coalescing_status(priv, resp);
 		break;
+	case HostCmd_CMD_MEM_ACCESS:
+		ret = mwifiex_ret_mem_access(priv, resp, data_buf);
+		break;
 	case HostCmd_CMD_MAC_REG_ACCESS:
 	case HostCmd_CMD_BBP_REG_ACCESS:
 	case HostCmd_CMD_RF_REG_ACCESS:
@@ -1122,6 +1169,9 @@
 		break;
 	case HostCmd_CMD_UAP_SYS_CONFIG:
 		break;
+	case HOST_CMD_APCMD_STA_LIST:
+		ret = mwifiex_ret_uap_sta_list(priv, resp);
+		break;
 	case HostCmd_CMD_UAP_BSS_START:
 		adapter->tx_lock_flag = false;
 		adapter->pps_uapsd_mode = false;
@@ -1133,6 +1183,8 @@
 		break;
 	case HostCmd_CMD_UAP_STA_DEAUTH:
 		break;
+	case HOST_CMD_APCMD_SYS_RESET:
+		break;
 	case HostCmd_CMD_MEF_CFG:
 		break;
 	case HostCmd_CMD_COALESCE_CFG:
@@ -1146,8 +1198,9 @@
 		ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
 		break;
 	default:
-		dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
-			resp->command);
+		mwifiex_dbg(adapter, ERROR,
+			    "CMD_RESP: unknown cmd response %#x\n",
+			    resp->command);
 		break;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 0dc7a1d..848de26 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -48,7 +48,8 @@
 	if (!priv->media_connected)
 		return;
 
-	dev_dbg(adapter->dev, "info: handles disconnect event\n");
+	mwifiex_dbg(adapter, INFO,
+		    "info: handles disconnect event\n");
 
 	priv->media_connected = false;
 
@@ -104,12 +105,14 @@
 	 * it could be used for re-assoc
 	 */
 
-	dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
-		priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
+	mwifiex_dbg(adapter, INFO,
+		    "info: previous SSID=%s, SSID len=%u\n",
+		    priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
 
-	dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
-		priv->curr_bss_params.bss_descriptor.ssid.ssid,
-		priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+	mwifiex_dbg(adapter, INFO,
+		    "info: current SSID=%s, SSID len=%u\n",
+		    priv->curr_bss_params.bss_descriptor.ssid.ssid,
+		    priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
 
 	memcpy(&priv->prev_ssid,
 	       &priv->curr_bss_params.bss_descriptor.ssid,
@@ -127,13 +130,13 @@
 	if (adapter->is_cmd_timedout && adapter->curr_cmd)
 		return;
 	priv->media_connected = false;
-	dev_dbg(adapter->dev,
-		"info: successfully disconnected from %pM: reason code %d\n",
-		priv->cfg_bssid, reason_code);
+	mwifiex_dbg(adapter, MSG,
+		    "info: successfully disconnected from %pM: reason code %d\n",
+		    priv->cfg_bssid, reason_code);
 	if (priv->bss_mode == NL80211_IFTYPE_STATION ||
 	    priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
 		cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
-				      GFP_KERNEL);
+				      false, GFP_KERNEL);
 	}
 	eth_zero_addr(priv->cfg_bssid);
 
@@ -154,13 +157,13 @@
 	/* reserved 2 bytes are not mandatory in tdls event */
 	if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
 			      sizeof(u16) - sizeof(adapter->event_cause))) {
-		dev_err(adapter->dev, "Invalid event length!\n");
+		mwifiex_dbg(adapter, ERROR, "Invalid event length!\n");
 		return -1;
 	}
 
 	sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
 	if (!sta_ptr) {
-		dev_err(adapter->dev, "cannot get sta entry!\n");
+		mwifiex_dbg(adapter, ERROR, "cannot get sta entry!\n");
 		return -1;
 	}
 
@@ -180,6 +183,63 @@
 }
 
 /*
+* This function handles coex events generated by firmware
+*/
+void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
+					     struct sk_buff *event_skb)
+{
+	struct mwifiex_adapter *adapter = priv->adapter;
+	struct mwifiex_ie_types_header *tlv;
+	struct mwifiex_ie_types_btcoex_aggr_win_size *winsizetlv;
+	struct mwifiex_ie_types_btcoex_scan_time *scantlv;
+	s32 len = event_skb->len - sizeof(u32);
+	u8 *cur_ptr = event_skb->data + sizeof(u32);
+	u16 tlv_type, tlv_len;
+
+	while (len >= sizeof(struct mwifiex_ie_types_header)) {
+		tlv = (struct mwifiex_ie_types_header *)cur_ptr;
+		tlv_len = le16_to_cpu(tlv->len);
+		tlv_type = le16_to_cpu(tlv->type);
+
+		if ((tlv_len + sizeof(struct mwifiex_ie_types_header)) > len)
+			break;
+		switch (tlv_type) {
+		case TLV_BTCOEX_WL_AGGR_WINSIZE:
+			winsizetlv =
+			    (struct mwifiex_ie_types_btcoex_aggr_win_size *)tlv;
+			adapter->coex_win_size = winsizetlv->coex_win_size;
+			adapter->coex_tx_win_size =
+				winsizetlv->tx_win_size;
+			adapter->coex_rx_win_size =
+				winsizetlv->rx_win_size;
+			mwifiex_coex_ampdu_rxwinsize(adapter);
+			mwifiex_update_ampdu_txwinsize(adapter);
+			break;
+
+		case TLV_BTCOEX_WL_SCANTIME:
+			scantlv =
+			    (struct mwifiex_ie_types_btcoex_scan_time *)tlv;
+			adapter->coex_scan = scantlv->coex_scan;
+			adapter->coex_min_scan_time = scantlv->min_scan_time;
+			adapter->coex_max_scan_time = scantlv->max_scan_time;
+			break;
+
+		default:
+			break;
+		}
+
+		len -= tlv_len + sizeof(struct mwifiex_ie_types_header);
+		cur_ptr += tlv_len +
+			sizeof(struct mwifiex_ie_types_header);
+	}
+
+	dev_dbg(adapter->dev, "coex_scan=%d min_scan=%d coex_win=%d, tx_win=%d rx_win=%d\n",
+		adapter->coex_scan, adapter->coex_min_scan_time,
+		adapter->coex_win_size, adapter->coex_tx_win_size,
+		adapter->coex_rx_win_size);
+}
+
+/*
  * This function handles events generated by firmware.
  *
  * This is a generic function and handles all events.
@@ -239,21 +299,21 @@
 
 	switch (eventcause) {
 	case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
-		dev_err(adapter->dev,
-			"invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
 		break;
 	case EVENT_LINK_SENSED:
-		dev_dbg(adapter->dev, "event: LINK_SENSED\n");
+		mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n");
 		if (!netif_carrier_ok(priv->netdev))
 			netif_carrier_on(priv->netdev);
 		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 		break;
 
 	case EVENT_DEAUTHENTICATED:
-		dev_dbg(adapter->dev, "event: Deauthenticated\n");
+		mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n");
 		if (priv->wps.session_enable) {
-			dev_dbg(adapter->dev,
-				"info: receive deauth event in wps session\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: receive deauth event in wps session\n");
 			break;
 		}
 		adapter->dbg.num_event_deauth++;
@@ -265,10 +325,10 @@
 		break;
 
 	case EVENT_DISASSOCIATED:
-		dev_dbg(adapter->dev, "event: Disassociated\n");
+		mwifiex_dbg(adapter, EVENT, "event: Disassociated\n");
 		if (priv->wps.session_enable) {
-			dev_dbg(adapter->dev,
-				"info: receive disassoc event in wps session\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: receive disassoc event in wps session\n");
 			break;
 		}
 		adapter->dbg.num_event_disassoc++;
@@ -280,7 +340,7 @@
 		break;
 
 	case EVENT_LINK_LOST:
-		dev_dbg(adapter->dev, "event: Link lost\n");
+		mwifiex_dbg(adapter, EVENT, "event: Link lost\n");
 		adapter->dbg.num_event_link_lost++;
 		if (priv->media_connected) {
 			reason_code =
@@ -290,7 +350,7 @@
 		break;
 
 	case EVENT_PS_SLEEP:
-		dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+		mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
 
 		adapter->ps_state = PS_STATE_PRE_SLEEP;
 
@@ -298,12 +358,12 @@
 		break;
 
 	case EVENT_PS_AWAKE:
-		dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
 		if (!adapter->pps_uapsd_mode &&
 		    priv->media_connected && adapter->sleep_period.period) {
 				adapter->pps_uapsd_mode = true;
-				dev_dbg(adapter->dev,
-					"event: PPS/UAPSD mode activated\n");
+				mwifiex_dbg(adapter, EVENT,
+					    "event: PPS/UAPSD mode activated\n");
 		}
 		adapter->tx_lock_flag = false;
 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -333,26 +393,26 @@
 
 	case EVENT_DEEP_SLEEP_AWAKE:
 		adapter->if_ops.wakeup_complete(adapter);
-		dev_dbg(adapter->dev, "event: DS_AWAKE\n");
+		mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n");
 		if (adapter->is_deep_sleep)
 			adapter->is_deep_sleep = false;
 		break;
 
 	case EVENT_HS_ACT_REQ:
-		dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
+		mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n");
 		ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
 				       0, 0, NULL, false);
 		break;
 
 	case EVENT_MIC_ERR_UNICAST:
-		dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+		mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n");
 		cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
 					     NL80211_KEYTYPE_PAIRWISE,
 					     -1, NULL, GFP_KERNEL);
 		break;
 
 	case EVENT_MIC_ERR_MULTICAST:
-		dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+		mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n");
 		cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
 					     NL80211_KEYTYPE_GROUP,
 					     -1, NULL, GFP_KERNEL);
@@ -362,7 +422,7 @@
 		break;
 
 	case EVENT_ADHOC_BCN_LOST:
-		dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
+		mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n");
 		priv->adhoc_is_link_sensed = false;
 		mwifiex_clean_txrx(priv);
 		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -371,17 +431,17 @@
 		break;
 
 	case EVENT_BG_SCAN_REPORT:
-		dev_dbg(adapter->dev, "event: BGS_REPORT\n");
+		mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n");
 		ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
 				       HostCmd_ACT_GEN_GET, 0, NULL, false);
 		break;
 
 	case EVENT_PORT_RELEASE:
-		dev_dbg(adapter->dev, "event: PORT RELEASE\n");
+		mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
 		break;
 
 	case EVENT_EXT_SCAN_REPORT:
-		dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+		mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
 		if (adapter->ext_scan)
 			ret = mwifiex_handle_event_ext_scan_report(priv,
 						adapter->event_skb->data);
@@ -389,7 +449,7 @@
 		break;
 
 	case EVENT_WMM_STATUS_CHANGE:
-		dev_dbg(adapter->dev, "event: WMM status changed\n");
+		mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n");
 		ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
 				       0, 0, NULL, false);
 		break;
@@ -401,13 +461,13 @@
 		mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
 				 HostCmd_ACT_GEN_GET, 0, NULL, false);
 		priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
-		dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
+		mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n");
 		break;
 	case EVENT_SNR_LOW:
-		dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n");
+		mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n");
 		break;
 	case EVENT_MAX_FAIL:
-		dev_dbg(adapter->dev, "event: MAX_FAIL\n");
+		mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n");
 		break;
 	case EVENT_RSSI_HIGH:
 		cfg80211_cqm_rssi_notify(priv->netdev,
@@ -416,47 +476,47 @@
 		mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
 				 HostCmd_ACT_GEN_GET, 0, NULL, false);
 		priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
-		dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
+		mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n");
 		break;
 	case EVENT_SNR_HIGH:
-		dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n");
+		mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n");
 		break;
 	case EVENT_DATA_RSSI_LOW:
-		dev_dbg(adapter->dev, "event: Data RSSI_LOW\n");
+		mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n");
 		break;
 	case EVENT_DATA_SNR_LOW:
-		dev_dbg(adapter->dev, "event: Data SNR_LOW\n");
+		mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n");
 		break;
 	case EVENT_DATA_RSSI_HIGH:
-		dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n");
+		mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n");
 		break;
 	case EVENT_DATA_SNR_HIGH:
-		dev_dbg(adapter->dev, "event: Data SNR_HIGH\n");
+		mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n");
 		break;
 	case EVENT_LINK_QUALITY:
-		dev_dbg(adapter->dev, "event: Link Quality\n");
+		mwifiex_dbg(adapter, EVENT, "event: Link Quality\n");
 		break;
 	case EVENT_PRE_BEACON_LOST:
-		dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n");
+		mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n");
 		break;
 	case EVENT_IBSS_COALESCED:
-		dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
+		mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n");
 		ret = mwifiex_send_cmd(priv,
 				HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
 				HostCmd_ACT_GEN_GET, 0, NULL, false);
 		break;
 	case EVENT_ADDBA:
-		dev_dbg(adapter->dev, "event: ADDBA Request\n");
+		mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
 		mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
 				 HostCmd_ACT_GEN_SET, 0,
 				 adapter->event_body, false);
 		break;
 	case EVENT_DELBA:
-		dev_dbg(adapter->dev, "event: DELBA Request\n");
+		mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
 		mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
 		break;
 	case EVENT_BA_STREAM_TIEMOUT:
-		dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+		mwifiex_dbg(adapter, EVENT, "event:  BA Stream timeout\n");
 		mwifiex_11n_ba_stream_timeout(priv,
 					      (struct host_cmd_ds_11n_batimeout
 					       *)
@@ -464,28 +524,31 @@
 		break;
 	case EVENT_AMSDU_AGGR_CTRL:
 		ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
-		dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+		mwifiex_dbg(adapter, EVENT,
+			    "event: AMSDU_AGGR_CTRL %d\n", ctrl);
 
 		adapter->tx_buf_size =
 				min_t(u16, adapter->curr_tx_buf_size, ctrl);
-		dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
-			adapter->tx_buf_size);
+		mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n",
+			    adapter->tx_buf_size);
 		break;
 
 	case EVENT_WEP_ICV_ERR:
-		dev_dbg(adapter->dev, "event: WEP ICV error\n");
+		mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n");
 		break;
 
 	case EVENT_BW_CHANGE:
-		dev_dbg(adapter->dev, "event: BW Change\n");
+		mwifiex_dbg(adapter, EVENT, "event: BW Change\n");
 		break;
 
 	case EVENT_HOSTWAKE_STAIE:
-		dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
+		mwifiex_dbg(adapter, EVENT,
+			    "event: HOSTWAKE_STAIE %d\n", eventcause);
 		break;
 
 	case EVENT_REMAIN_ON_CHAN_EXPIRED:
-		dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+		mwifiex_dbg(adapter, EVENT,
+			    "event: Remain on channel expired\n");
 		cfg80211_remain_on_channel_expired(&priv->wdev,
 						   priv->roc_cfg.cookie,
 						   &priv->roc_cfg.chan,
@@ -496,7 +559,7 @@
 		break;
 
 	case EVENT_CHANNEL_SWITCH_ANN:
-		dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
+		mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n");
 		priv->csa_expire_time =
 				jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
 		priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
@@ -511,23 +574,28 @@
 		break;
 
 	case EVENT_TX_STATUS_REPORT:
-		dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+		mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
 		mwifiex_parse_tx_status_event(priv, adapter->event_body);
 		break;
 
 	case EVENT_CHANNEL_REPORT_RDY:
-		dev_dbg(adapter->dev, "event: Channel Report\n");
+		mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
 		ret = mwifiex_11h_handle_chanrpt_ready(priv,
 						       adapter->event_skb);
 		break;
 	case EVENT_RADAR_DETECTED:
-		dev_dbg(adapter->dev, "event: Radar detected\n");
+		mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
 		ret = mwifiex_11h_handle_radar_detected(priv,
 							adapter->event_skb);
 		break;
+	case EVENT_BT_COEX_WLAN_PARA_CHANGE:
+		dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n");
+		mwifiex_bt_coex_wlan_param_update_event(priv,
+							adapter->event_skb);
+		break;
 	default:
-		dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
-			eventcause);
+		mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
+			    eventcause);
 		break;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 0599e41..d8b7d9c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -64,7 +64,10 @@
 						  *(cmd_queued->condition),
 						  (12 * HZ));
 	if (status <= 0) {
-		dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+		if (status == 0)
+			status = -ETIMEDOUT;
+		mwifiex_dbg(adapter, ERROR,
+			    "cmd_wait_q terminated: %d\n", status);
 		mwifiex_cancel_all_pending_cmd(adapter);
 		return status;
 	}
@@ -91,7 +94,8 @@
 	old_pkt_filter = priv->curr_pkt_filter;
 
 	if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
-		dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: Enable Promiscuous mode\n");
 		priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
 		priv->curr_pkt_filter &=
 			~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
@@ -99,16 +103,16 @@
 		/* Multicast */
 		priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
 		if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
-			dev_dbg(priv->adapter->dev,
-				"info: Enabling All Multicast!\n");
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: Enabling All Multicast!\n");
 			priv->curr_pkt_filter |=
 				HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
 		} else {
 			priv->curr_pkt_filter &=
 				~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
-			dev_dbg(priv->adapter->dev,
-				"info: Set multicast list=%d\n",
-				mcast_list->num_multicast_addr);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: Set multicast list=%d\n",
+				    mcast_list->num_multicast_addr);
 			/* Send multicast addresses to firmware */
 			ret = mwifiex_send_cmd(priv,
 					       HostCmd_CMD_MAC_MULTICAST_ADR,
@@ -116,9 +120,9 @@
 					       mcast_list, false);
 		}
 	}
-	dev_dbg(priv->adapter->dev,
-		"info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
-	       old_pkt_filter, priv->curr_pkt_filter);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
+		    old_pkt_filter, priv->curr_pkt_filter);
 	if (old_pkt_filter != priv->curr_pkt_filter) {
 		ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
 				       HostCmd_ACT_GEN_SET,
@@ -151,7 +155,8 @@
 	rcu_read_unlock();
 
 	if (!beacon_ie) {
-		dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    " failed to alloc beacon_ie\n");
 		return -ENOMEM;
 	}
 
@@ -165,7 +170,8 @@
 	bss_desc->bss_band = bss_priv->band;
 	bss_desc->fw_tsf = bss_priv->fw_tsf;
 	if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
-		dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: InterpretIE: AP WEP enabled\n");
 		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
 	} else {
 		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
@@ -219,8 +225,8 @@
 
 	if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
 		rcu_read_unlock();
-		wiphy_dbg(priv->wdev.wiphy,
-			  "11D: skip setting domain info in FW\n");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "11D: skip setting domain info in FW\n");
 		return 0;
 	}
 	memcpy(priv->adapter->country_code, &country_ie[2], 2);
@@ -241,8 +247,8 @@
 
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
 			     HostCmd_ACT_GEN_SET, 0, NULL, false)) {
-		wiphy_err(priv->adapter->wiphy,
-			  "11D: setting domain info in FW\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "11D: setting domain info in FW fail\n");
 		return -1;
 	}
 
@@ -304,14 +310,15 @@
 
 		if (mwifiex_11h_get_csa_closed_channel(priv) ==
 							(u8)bss_desc->channel) {
-			dev_err(adapter->dev,
-				"Attempt to reconnect on csa closed chan(%d)\n",
-				bss_desc->channel);
+			mwifiex_dbg(adapter, ERROR,
+				    "Attempt to reconnect on csa closed chan(%d)\n",
+				    bss_desc->channel);
 			goto done;
 		}
 
-		dev_dbg(adapter->dev, "info: SSID found in scan list ... "
-				      "associating...\n");
+		mwifiex_dbg(adapter, INFO,
+			    "info: SSID found in scan list ...\t"
+			    "associating...\n");
 
 		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 		if (netif_carrier_ok(priv->netdev))
@@ -353,15 +360,17 @@
 			netif_carrier_off(priv->netdev);
 
 		if (!ret) {
-			dev_dbg(adapter->dev, "info: network found in scan"
-							" list. Joining...\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: network found in scan\t"
+				    " list. Joining...\n");
 			ret = mwifiex_adhoc_join(priv, bss_desc);
 			if (bss)
 				cfg80211_put_bss(priv->adapter->wiphy, bss);
 		} else {
-			dev_dbg(adapter->dev, "info: Network not found in "
-				"the list, creating adhoc with ssid = %s\n",
-				req_ssid->ssid);
+			mwifiex_dbg(adapter, INFO,
+				    "info: Network not found in\t"
+				    "the list, creating adhoc with ssid = %s\n",
+				    req_ssid->ssid);
 			ret = mwifiex_adhoc_start(priv, req_ssid);
 		}
 	}
@@ -396,8 +405,9 @@
 	switch (action) {
 	case HostCmd_ACT_GEN_SET:
 		if (adapter->pps_uapsd_mode) {
-			dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
-				" is blocked in UAPSD/PPS mode\n");
+			mwifiex_dbg(adapter, INFO,
+				    "info: Host Sleep IOCTL\t"
+				    "is blocked in UAPSD/PPS mode\n");
 			status = -1;
 			break;
 		}
@@ -494,7 +504,8 @@
 	}
 
 	if (adapter->hs_activated) {
-		dev_dbg(adapter->dev, "cmd: HS Already activated\n");
+		mwifiex_dbg(adapter, CMD,
+			    "cmd: HS Already activated\n");
 		return true;
 	}
 
@@ -510,14 +521,16 @@
 						   MWIFIEX_BSS_ROLE_STA),
 				  HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
 				  &hscfg)) {
-		dev_err(adapter->dev, "IOCTL request HS enable failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "IOCTL request HS enable failed\n");
 		return false;
 	}
 
 	if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
 					     adapter->hs_activate_wait_q_woken,
 					     (10 * HZ)) <= 0) {
-		dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "hs_activate_wait_q terminated\n");
 		return false;
 	}
 
@@ -637,10 +650,11 @@
 		dbm = (u16) power_cfg->power_level;
 		if ((dbm < priv->min_tx_power_level) ||
 		    (dbm > priv->max_tx_power_level)) {
-			dev_err(priv->adapter->dev, "txpower value %d dBm"
-				" is out of range (%d dBm-%d dBm)\n",
-				dbm, priv->min_tx_power_level,
-				priv->max_tx_power_level);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "txpower value %d dBm\t"
+				    "is out of range (%d dBm-%d dBm)\n",
+				    dbm, priv->min_tx_power_level,
+				    priv->max_tx_power_level);
 			return -1;
 		}
 	}
@@ -739,14 +753,15 @@
 {
 	if (ie_len) {
 		if (ie_len > sizeof(priv->wpa_ie)) {
-			dev_err(priv->adapter->dev,
-				"failed to copy WPA IE, too big\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "failed to copy WPA IE, too big\n");
 			return -1;
 		}
 		memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
 		priv->wpa_ie_len = (u8) ie_len;
-		dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
-			priv->wpa_ie_len, priv->wpa_ie[0]);
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: Set Wpa_ie_len=%d IE=%#x\n",
+			    priv->wpa_ie_len, priv->wpa_ie[0]);
 
 		if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
 			priv->sec_info.wpa_enabled = true;
@@ -759,8 +774,9 @@
 	} else {
 		memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
 		priv->wpa_ie_len = 0;
-		dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
-			priv->wpa_ie_len, priv->wpa_ie[0]);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: reset wpa_ie_len=%d IE=%#x\n",
+			    priv->wpa_ie_len, priv->wpa_ie[0]);
 		priv->sec_info.wpa_enabled = false;
 		priv->sec_info.wpa2_enabled = false;
 	}
@@ -780,23 +796,24 @@
 {
 	if (ie_len) {
 		if (ie_len > sizeof(priv->wapi_ie)) {
-			dev_dbg(priv->adapter->dev,
-				"info: failed to copy WAPI IE, too big\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "info: failed to copy WAPI IE, too big\n");
 			return -1;
 		}
 		memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
 		priv->wapi_ie_len = ie_len;
-		dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
-			priv->wapi_ie_len, priv->wapi_ie[0]);
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: Set wapi_ie_len=%d IE=%#x\n",
+			    priv->wapi_ie_len, priv->wapi_ie[0]);
 
 		if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
 			priv->sec_info.wapi_enabled = true;
 	} else {
 		memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
 		priv->wapi_ie_len = ie_len;
-		dev_dbg(priv->adapter->dev,
-			"info: Reset wapi_ie_len=%d IE=%#x\n",
-		       priv->wapi_ie_len, priv->wapi_ie[0]);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: Reset wapi_ie_len=%d IE=%#x\n",
+			    priv->wapi_ie_len, priv->wapi_ie[0]);
 		priv->sec_info.wapi_enabled = false;
 	}
 	return 0;
@@ -814,8 +831,8 @@
 {
 	if (ie_len) {
 		if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
-			dev_dbg(priv->adapter->dev,
-				"info: failed to copy WPS IE, too big\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "info: failed to copy WPS IE, too big\n");
 			return -1;
 		}
 
@@ -825,13 +842,14 @@
 
 		memcpy(priv->wps_ie, ie_data_ptr, ie_len);
 		priv->wps_ie_len = ie_len;
-		dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
-			priv->wps_ie_len, priv->wps_ie[0]);
+		mwifiex_dbg(priv->adapter, CMD,
+			    "cmd: Set wps_ie_len=%d IE=%#x\n",
+			    priv->wps_ie_len, priv->wps_ie[0]);
 	} else {
 		kfree(priv->wps_ie);
 		priv->wps_ie_len = ie_len;
-		dev_dbg(priv->adapter->dev,
-			"info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
 	}
 	return 0;
 }
@@ -875,8 +893,8 @@
 		/* Copy the required key as the current key */
 		wep_key = &priv->wep_key[index];
 		if (!wep_key->key_length) {
-			dev_err(adapter->dev,
-				"key not set, so cannot enable it\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "key not set, so cannot enable it\n");
 			return -1;
 		}
 
@@ -953,7 +971,8 @@
 
 	/* Current driver only supports key length of up to 32 bytes */
 	if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) {
-		dev_err(priv->adapter->dev, "key length too long\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "key length too long\n");
 		return -1;
 	}
 
@@ -1040,7 +1059,7 @@
 
 	snprintf(version, max_len, driver_version, fw_ver);
 
-	dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
+	mwifiex_dbg(adapter, MSG, "info: MWIFIEX VERSION: %s\n", version);
 
 	return 0;
 }
@@ -1128,7 +1147,8 @@
 	}
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
 			     action, 0, &roc_cfg, true)) {
-		dev_err(priv->adapter->dev, "failed to remain on channel\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "failed to remain on channel\n");
 		return -1;
 	}
 
@@ -1313,8 +1333,8 @@
 		if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
 		    (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) {
 			priv->wps.session_enable = true;
-			dev_dbg(priv->adapter->dev,
-				"info: WPS Session Enabled.\n");
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: WPS Session Enabled.\n");
 			ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
 		}
 
@@ -1361,7 +1381,8 @@
 		memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
 		if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
 			adapter->arp_filter_size = 0;
-			dev_err(adapter->dev, "invalid ARP filter size\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "invalid ARP filter size\n");
 			return -1;
 		} else {
 			memcpy(adapter->arp_filter, gen_ie->ie_data,
@@ -1370,7 +1391,7 @@
 		}
 		break;
 	default:
-		dev_err(adapter->dev, "invalid IE type\n");
+		mwifiex_dbg(adapter, ERROR, "invalid IE type\n");
 		return -1;
 	}
 	return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index b8729c9..d4d4cb1 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -141,7 +141,7 @@
 
 	if (priv->hs2_enabled &&
 	    mwifiex_discard_gratuitous_arp(priv, skb)) {
-		dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
+		mwifiex_dbg(priv->adapter, INFO, "Bypassed Gratuitous ARP\n");
 		dev_kfree_skb_any(skb);
 		return 0;
 	}
@@ -166,7 +166,8 @@
 
 	ret = mwifiex_recv_packet(priv, skb);
 	if (ret == -1)
-		dev_err(priv->adapter->dev, "recv packet failed\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "recv packet failed\n");
 
 	return ret;
 }
@@ -203,9 +204,9 @@
 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
 
 	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
-		dev_err(adapter->dev,
-			"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
-			skb->len, rx_pkt_offset, rx_pkt_length);
+		mwifiex_dbg(adapter, ERROR,
+			    "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+			    skb->len, rx_pkt_offset, rx_pkt_length);
 		priv->stats.rx_dropped++;
 		dev_kfree_skb_any(skb);
 		return ret;
@@ -214,7 +215,7 @@
 	if (rx_pkt_type == PKT_TYPE_MGMT) {
 		ret = mwifiex_process_mgmt_packet(priv, skb);
 		if (ret)
-			dev_err(adapter->dev, "Rx of mgmt packet failed");
+			mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed");
 		dev_kfree_skb_any(skb);
 		return ret;
 	}
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5ce2d9a..355ac59 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -53,7 +53,8 @@
 		       INTF_HEADER_LEN;
 
 	if (!skb->len) {
-		dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+		mwifiex_dbg(adapter, ERROR,
+			    "Tx: bad packet length: %d\n", skb->len);
 		tx_info->status_code = -1;
 		return skb->data;
 	}
@@ -184,21 +185,24 @@
 	switch (ret) {
 	case -EBUSY:
 		dev_kfree_skb_any(skb);
-		dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
-			__func__, ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: host_to_card failed: ret=%d\n",
+			    __func__, ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		break;
 	case -1:
 		adapter->data_sent = false;
 		dev_kfree_skb_any(skb);
-		dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
-			__func__, ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: host_to_card failed: ret=%d\n",
+			    __func__, ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		break;
 	case 0:
 		dev_kfree_skb_any(skb);
-		dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
-			__func__);
+		mwifiex_dbg(adapter, DATA,
+			    "data: %s: host_to_card succeeded\n",
+			    __func__);
 		adapter->tx_lock_flag = true;
 		break;
 	case -EINPROGRESS:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 087d847..2faa1bc 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -37,7 +37,7 @@
 	u32 tid;
 	u8 tid_down;
 
-	dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+	mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
 	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
 
 	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
@@ -94,7 +94,7 @@
 	unsigned long flags;
 	int i;
 
-	dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+	mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
 	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
 
 	for (i = 0; i < MAX_NUM_TID; i++) {
@@ -132,8 +132,8 @@
 	supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
 
 	if (skb_tailroom(skb) < rates_size + 4) {
-		dev_err(priv->adapter->dev,
-			"Insuffient space while adding rates\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Insuffient space while adding rates\n");
 		return -ENOMEM;
 	}
 
@@ -199,8 +199,8 @@
 
 	sta_ptr = mwifiex_get_sta_entry(priv, mac);
 	if (unlikely(!sta_ptr)) {
-		dev_warn(priv->adapter->dev,
-			 "TDLS peer station not found in list\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "TDLS peer station not found in list\n");
 		return -1;
 	}
 
@@ -247,15 +247,16 @@
 
 	sta_ptr = mwifiex_get_sta_entry(priv, mac);
 	if (unlikely(!sta_ptr)) {
-		dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "TDLS peer station not found in list\n");
 		return -1;
 	}
 
 	if (!mwifiex_is_bss_in_11ac_mode(priv)) {
 		if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
 		   WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
-			dev_dbg(adapter->dev,
-				"TDLS peer doesn't support wider bandwitdh\n");
+			mwifiex_dbg(adapter, WARN,
+				    "TDLS peer doesn't support wider bandwidth\n");
 			return 0;
 		}
 	} else {
@@ -554,7 +555,7 @@
 		tf->u.discover_req.dialog_token = dialog_token;
 		break;
 	default:
-		dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
 		return -EINVAL;
 	}
 
@@ -608,8 +609,8 @@
 
 	skb = dev_alloc_skb(skb_len);
 	if (!skb) {
-		dev_err(priv->adapter->dev,
-			"allocate skb failed for management frame\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "allocate skb failed for management frame\n");
 		return -ENOMEM;
 	}
 	skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
@@ -742,7 +743,7 @@
 		mwifiex_tdls_add_qos_capab(skb);
 		break;
 	default:
-		dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n");
 		return -EINVAL;
 	}
 
@@ -781,8 +782,8 @@
 
 	skb = dev_alloc_skb(skb_len);
 	if (!skb) {
-		dev_err(priv->adapter->dev,
-			"allocate skb failed for management frame\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "allocate skb failed for management frame\n");
 		return -ENOMEM;
 	}
 
@@ -848,8 +849,8 @@
 
 	peer = buf + ETH_ALEN;
 	action = *(buf + sizeof(struct ethhdr) + 2);
-	dev_dbg(priv->adapter->dev,
-		"rx:tdls action: peer=%pM, action=%d\n", peer, action);
+	mwifiex_dbg(priv->adapter, DATA,
+		    "rx:tdls action: peer=%pM, action=%d\n", peer, action);
 
 	switch (action) {
 	case WLAN_TDLS_SETUP_REQUEST:
@@ -880,7 +881,7 @@
 		ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
 		break;
 	default:
-		dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
 		return;
 	}
 
@@ -967,8 +968,8 @@
 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
 
 	if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
-		dev_err(priv->adapter->dev,
-			"link absent for peer %pM; cannot config\n", peer);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "link absent for peer %pM; cannot config\n", peer);
 		return -EINVAL;
 	}
 
@@ -988,8 +989,8 @@
 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
 
 	if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
-		dev_dbg(priv->adapter->dev,
-			"Setup already in progress for peer %pM\n", peer);
+		mwifiex_dbg(priv->adapter, WARN,
+			    "Setup already in progress for peer %pM\n", peer);
 		return 0;
 	}
 
@@ -1046,8 +1047,8 @@
 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
 
 	if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
-		dev_dbg(priv->adapter->dev,
-			"tdls: enable link %pM success\n", peer);
+		mwifiex_dbg(priv->adapter, MSG,
+			    "tdls: enable link %pM success\n", peer);
 
 		sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
 
@@ -1076,8 +1077,8 @@
 		mwifiex_auto_tdls_update_peer_status(priv, peer,
 						     TDLS_SETUP_COMPLETE);
 	} else {
-		dev_dbg(priv->adapter->dev,
-			"tdls: enable link %pM failed\n", peer);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tdls: enable link %pM failed\n", peer);
 		if (sta_ptr) {
 			mwifiex_11n_cleanup_reorder_tbl(priv);
 			spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
@@ -1180,9 +1181,9 @@
 		tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
 		if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
 				     HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
-			dev_warn(priv->adapter->dev,
-				 "Disable link failed for TDLS peer %pM",
-				 sta_ptr->mac_addr);
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Disable link failed for TDLS peer %pM",
+				    sta_ptr->mac_addr);
 	}
 
 	mwifiex_del_all_sta_list(priv);
@@ -1204,9 +1205,9 @@
 			    (peer->failure_count <
 			     MWIFIEX_TDLS_MAX_FAIL_COUNT)) {
 				peer->tdls_status = TDLS_SETUP_INPROGRESS;
-				dev_dbg(priv->adapter->dev,
-					"setup TDLS link, peer=%pM rssi=%d\n",
-					peer->mac_addr, peer->rssi);
+				mwifiex_dbg(priv->adapter, INFO,
+					    "setup TDLS link, peer=%pM rssi=%d\n",
+					    peer->mac_addr, peer->rssi);
 
 				cfg80211_tdls_oper_request(priv->netdev,
 							   peer->mac_addr,
@@ -1272,8 +1273,8 @@
 		tdls_peer->rssi_jiffies = jiffies;
 		INIT_LIST_HEAD(&tdls_peer->list);
 		list_add_tail(&tdls_peer->list, &priv->auto_tdls_list);
-		dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n",
-			mac);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "Add auto TDLS peer= %pM to list\n", mac);
 	}
 
 	spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
@@ -1341,8 +1342,8 @@
 		return;
 
 	if (!priv->auto_tdls_timer_active) {
-		dev_dbg(priv->adapter->dev,
-			"auto TDLS timer inactive; return");
+		mwifiex_dbg(priv->adapter, INFO,
+			    "auto TDLS timer inactive; return");
 		return;
 	}
 
@@ -1368,9 +1369,9 @@
 		     !tdls_peer->rssi) &&
 		    tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
 			tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
-			dev_dbg(priv->adapter->dev,
-				"teardown TDLS link,peer=%pM rssi=%d\n",
-				tdls_peer->mac_addr, -tdls_peer->rssi);
+			mwifiex_dbg(priv->adapter, MSG,
+				    "teardown TDLS link,peer=%pM rssi=%d\n",
+				    tdls_peer->mac_addr, -tdls_peer->rssi);
 			tdls_peer->do_discover = true;
 			priv->check_tdls_tx = true;
 			cfg80211_tdls_oper_request(priv->netdev,
@@ -1384,9 +1385,10 @@
 			   MWIFIEX_TDLS_MAX_FAIL_COUNT) {
 				priv->check_tdls_tx = true;
 				tdls_peer->do_setup = true;
-				dev_dbg(priv->adapter->dev,
-					"check TDLS with peer=%pM rssi=%d\n",
-					tdls_peer->mac_addr, -tdls_peer->rssi);
+				mwifiex_dbg(priv->adapter, INFO,
+					    "check TDLS with peer=%pM\t"
+					    "rssi=%d\n", tdls_peer->mac_addr,
+					    tdls_peer->rssi);
 		}
 	}
 	spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a245f44..5ed9b79 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -50,11 +50,15 @@
 		priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
 	if (!priv) {
-		dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "data: priv not found. Drop RX packet\n");
 		dev_kfree_skb_any(skb);
 		return -1;
 	}
 
+	mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data,
+			 min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
+
 	memset(rx_info, 0, sizeof(*rx_info));
 	rx_info->bss_num = priv->bss_num;
 	rx_info->bss_type = priv->bss_type;
@@ -84,13 +88,22 @@
 	struct mwifiex_adapter *adapter = priv->adapter;
 	u8 *head_ptr;
 	struct txpd *local_tx_pd = NULL;
+	struct mwifiex_sta_node *dest_node;
+	struct ethhdr *hdr = (void *)skb->data;
 
 	hroom = (adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
 
-	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
+		dest_node = mwifiex_get_sta_entry(priv, hdr->h_dest);
+		if (dest_node) {
+			dest_node->stats.tx_bytes += skb->len;
+			dest_node->stats.tx_packets++;
+		}
+
 		head_ptr = mwifiex_process_uap_txpd(priv, skb);
-	else
+	} else {
 		head_ptr = mwifiex_process_sta_txpd(priv, skb);
+	}
 
 	if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) {
 		skb_queue_tail(&adapter->tx_data_q, skb);
@@ -112,10 +125,12 @@
 							   skb, tx_param);
 		}
 	}
+	mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data,
+			 min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
 
 	switch (ret) {
 	case -ENOSR:
-		dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
+		mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
 		break;
 	case -EBUSY:
 		if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -124,13 +139,14 @@
 				if (local_tx_pd)
 					local_tx_pd->flags = 0;
 		}
-		dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		break;
 	case -1:
 		if (adapter->iface_type != MWIFIEX_PCIE)
 			adapter->data_sent = false;
-		dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
-			ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "mwifiex_write_data_async failed: 0x%X\n",
+			    ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
@@ -162,7 +178,8 @@
 	priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
 				      tx_info->bss_type);
 	if (!priv) {
-		dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "data: priv not found. Drop TX packet\n");
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb, 0, 0);
 		return ret;
@@ -187,7 +204,7 @@
 	}
 	switch (ret) {
 	case -ENOSR:
-		dev_err(adapter->dev, "data: -ENOSR is returned\n");
+		mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
 		break;
 	case -EBUSY:
 		if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -202,13 +219,13 @@
 			atomic_add(tx_info->aggr_num, &adapter->tx_queued);
 		else
 			atomic_inc(&adapter->tx_queued);
-		dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		break;
 	case -1:
 		if (adapter->iface_type != MWIFIEX_PCIE)
 			adapter->data_sent = false;
-		dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
-			ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "mwifiex_write_data_async failed: 0x%X\n", ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
@@ -302,11 +319,11 @@
 		priv->stats.tx_errors++;
 	}
 
-	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
+	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
 		atomic_dec_return(&adapter->pending_bridged_pkts);
-		if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
-			goto done;
-	}
+
+	if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+		goto done;
 
 	if (aggr)
 		/* For skb_aggr, do not wake up tx queue */
@@ -319,7 +336,7 @@
 		txq = netdev_get_tx_queue(priv->netdev, index);
 		if (netif_tx_queue_stopped(txq)) {
 			netif_tx_wake_queue(txq);
-			dev_dbg(adapter->dev, "wake queue: %d\n", index);
+			mwifiex_dbg(adapter, DATA, "wake queue: %d\n", index);
 		}
 	}
 done:
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f5c2af0..b749300 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,7 +167,7 @@
 	ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
 				 params->beacon.tail_len);
 	if (ht_ie) {
-		memcpy(&bss_cfg->ht_cap, ht_ie,
+		memcpy(&bss_cfg->ht_cap, ht_ie + 2,
 		       sizeof(struct ieee80211_ht_cap));
 		cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
 		memset(&bss_cfg->ht_cap.mcs, 0,
@@ -184,8 +184,8 @@
 			bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
 			break;
 		default:
-			dev_warn(priv->adapter->dev,
-				 "Unsupported RX-STBC, default to 2x2\n");
+			mwifiex_dbg(priv->adapter, WARN,
+				    "Unsupported RX-STBC, default to 2x2\n");
 			bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
 			bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
 			break;
@@ -222,6 +222,23 @@
 	return;
 }
 
+/* This function updates 11ac related parameters from IE
+ * and sets them into bss_config structure.
+ */
+void mwifiex_set_tpc_params(struct mwifiex_private *priv,
+			    struct mwifiex_uap_bss_param *bss_cfg,
+			    struct cfg80211_ap_settings *params)
+{
+	const u8 *tpc_ie;
+
+	tpc_ie = cfg80211_find_ie(WLAN_EID_TPC_REQUEST, params->beacon.tail,
+				  params->beacon.tail_len);
+	if (tpc_ie)
+		bss_cfg->power_constraint = *(tpc_ie + 2);
+	else
+		bss_cfg->power_constraint = 0;
+}
+
 /* Enable VHT only when cfg80211_ap_settings has VHT IE.
  * Otherwise disable VHT.
  */
@@ -466,6 +483,7 @@
 	struct host_cmd_tlv_auth_type *auth_type;
 	struct host_cmd_tlv_rates *tlv_rates;
 	struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
+	struct host_cmd_tlv_power_constraint *pwr_ct;
 	struct mwifiex_ie_types_htcap *htcap;
 	struct mwifiex_ie_types_wmmcap *wmm_cap;
 	struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
@@ -644,6 +662,15 @@
 		tlv += sizeof(*ao_timer);
 	}
 
+	if (bss_cfg->power_constraint) {
+		pwr_ct = (void *)tlv;
+		pwr_ct->header.type = cpu_to_le16(TLV_TYPE_PWR_CONSTRAINT);
+		pwr_ct->header.len = cpu_to_le16(sizeof(u8));
+		pwr_ct->constraint = bss_cfg->power_constraint;
+		cmd_size += sizeof(*pwr_ct);
+		tlv += sizeof(*pwr_ct);
+	}
+
 	if (bss_cfg->ps_sta_ao_timer) {
 		ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
 		ps_ao_timer->header.type =
@@ -754,6 +781,8 @@
 		break;
 	case HostCmd_CMD_UAP_BSS_START:
 	case HostCmd_CMD_UAP_BSS_STOP:
+	case HOST_CMD_APCMD_SYS_RESET:
+	case HOST_CMD_APCMD_STA_LIST:
 		cmd->command = cpu_to_le16(cmd_no);
 		cmd->size = cpu_to_le16(S_DS_GEN);
 		break;
@@ -767,19 +796,22 @@
 			return -1;
 		break;
 	default:
-		dev_err(priv->adapter->dev,
-			"PREP_CMD: unknown cmd %#x\n", cmd_no);
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "PREP_CMD: unknown cmd %#x\n", cmd_no);
 		return -1;
 	}
 
 	return 0;
 }
 
-void mwifiex_uap_set_channel(struct mwifiex_uap_bss_param *bss_cfg,
+void mwifiex_uap_set_channel(struct mwifiex_private *priv,
+			     struct mwifiex_uap_bss_param *bss_cfg,
 			     struct cfg80211_chan_def chandef)
 {
 	u8 config_bands = 0;
 
+	priv->bss_chandef = chandef;
+
 	bss_cfg->channel = ieee80211_frequency_to_channel(
 						     chandef.chan->center_freq);
 
@@ -800,30 +832,53 @@
 		if (chandef.width > NL80211_CHAN_WIDTH_40)
 			config_bands |= BAND_AAC;
 	}
+
+	priv->adapter->config_bands = config_bands;
 }
 
 int mwifiex_config_start_uap(struct mwifiex_private *priv,
 			     struct mwifiex_uap_bss_param *bss_cfg)
 {
+	enum state_11d_t state_11d;
+
 	if (mwifiex_del_mgmt_ies(priv))
-		dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to delete mgmt IEs!\n");
 
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
 			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-		dev_err(priv->adapter->dev, "Failed to stop the BSS\n");
+		mwifiex_dbg(priv->adapter, ERROR, "Failed to stop the BSS\n");
+		return -1;
+	}
+
+	if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET,
+			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
+		mwifiex_dbg(priv->adapter, ERROR, "Failed to reset BSS\n");
 		return -1;
 	}
 
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
 			     HostCmd_ACT_GEN_SET,
 			     UAP_BSS_PARAMS_I, bss_cfg, false)) {
-		dev_err(priv->adapter->dev, "Failed to set the SSID\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to set the SSID\n");
+		return -1;
+	}
+
+	/* Send cmd to FW to enable 11D function */
+	state_11d = ENABLE_11D;
+	if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+			     HostCmd_ACT_GEN_SET, DOT11D_I,
+			     &state_11d, true)) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "11D: failed to enable 11D\n");
 		return -1;
 	}
 
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
 			     HostCmd_ACT_GEN_SET, 0, NULL, false)) {
-		dev_err(priv->adapter->dev, "Failed to start the BSS\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Failed to start the BSS\n");
 		return -1;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index f4794cd..7bc1f85 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -21,8 +21,70 @@
 #include "main.h"
 #include "11n.h"
 
+#define MWIFIEX_BSS_START_EVT_FIX_SIZE    12
 
+static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
+					 struct sk_buff *event)
+{
+	int evt_len;
+	u8 *curr;
+	u16 tlv_len;
+	struct mwifiex_ie_types_data *tlv_hdr;
+	struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
+	int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
 
+	priv->wmm_enabled = false;
+	skb_pull(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+	evt_len = event->len;
+	curr = event->data;
+
+	mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
+			 event->data, event->len);
+
+	while ((evt_len >= sizeof(tlv_hdr->header))) {
+		tlv_hdr = (struct mwifiex_ie_types_data *)curr;
+		tlv_len = le16_to_cpu(tlv_hdr->header.len);
+
+		if (evt_len < tlv_len + sizeof(tlv_hdr->header))
+			break;
+
+		switch (le16_to_cpu(tlv_hdr->header.type)) {
+		case WLAN_EID_HT_CAPABILITY:
+			priv->ap_11n_enabled = true;
+			break;
+
+		case WLAN_EID_VHT_CAPABILITY:
+			priv->ap_11ac_enabled = true;
+			break;
+
+		case WLAN_EID_VENDOR_SPECIFIC:
+			/* Point the regular IEEE IE 2 bytes into the Marvell IE
+			 * and setup the IEEE IE type and length byte fields
+			 */
+			wmm_param_ie = (void *)(curr + 2);
+			wmm_param_ie->vend_hdr.len = (u8)tlv_len;
+			wmm_param_ie->vend_hdr.element_id =
+						WLAN_EID_VENDOR_SPECIFIC;
+			mwifiex_dbg(priv->adapter, EVENT,
+				    "info: check uap capabilities:\t"
+				    "wmm parameter set count: %d\n",
+				    wmm_param_ie->qos_info_bitmap & mask);
+
+			mwifiex_wmm_setup_ac_downgrade(priv);
+			priv->wmm_enabled = true;
+			mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
+			break;
+
+		default:
+			break;
+		}
+
+		curr += (tlv_len + sizeof(tlv_hdr->header));
+		evt_len -= (tlv_len + sizeof(tlv_hdr->header));
+	}
+
+	return 0;
+}
 
 /*
  * This function handles AP interface specific events generated by firmware.
@@ -80,8 +142,8 @@
 
 		node = mwifiex_add_sta_entry(priv, event->sta_addr);
 		if (!node) {
-			dev_warn(adapter->dev,
-				 "could not create station entry!\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "could not create station entry!\n");
 			return -1;
 		}
 
@@ -128,58 +190,63 @@
 		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 		break;
 	case EVENT_UAP_BSS_START:
-		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+		mwifiex_dbg(adapter, EVENT,
+			    "AP EVENT: event id: %#x\n", eventcause);
 		memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
 		       ETH_ALEN);
 		if (priv->hist_data)
 			mwifiex_hist_data_reset(priv);
+		mwifiex_check_uap_capabilties(priv, adapter->event_skb);
 		break;
 	case EVENT_UAP_MIC_COUNTERMEASURES:
 		/* For future development */
-		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+		mwifiex_dbg(adapter, EVENT,
+			    "AP EVENT: event id: %#x\n", eventcause);
 		break;
 	case EVENT_AMSDU_AGGR_CTRL:
 		ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
-		dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+		mwifiex_dbg(adapter, EVENT,
+			    "event: AMSDU_AGGR_CTRL %d\n", ctrl);
 
 		if (priv->media_connected) {
 			adapter->tx_buf_size =
 				min_t(u16, adapter->curr_tx_buf_size, ctrl);
-			dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
-				adapter->tx_buf_size);
+			mwifiex_dbg(adapter, EVENT,
+				    "event: tx_buf_size %d\n",
+				    adapter->tx_buf_size);
 		}
 		break;
 	case EVENT_ADDBA:
-		dev_dbg(adapter->dev, "event: ADDBA Request\n");
+		mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
 		if (priv->media_connected)
 			mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
 					 HostCmd_ACT_GEN_SET, 0,
 					 adapter->event_body, false);
 		break;
 	case EVENT_DELBA:
-		dev_dbg(adapter->dev, "event: DELBA Request\n");
+		mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
 		if (priv->media_connected)
 			mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
 		break;
 	case EVENT_BA_STREAM_TIEMOUT:
-		dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+		mwifiex_dbg(adapter, EVENT, "event:  BA Stream timeout\n");
 		if (priv->media_connected) {
 			ba_timeout = (void *)adapter->event_body;
 			mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
 		}
 		break;
 	case EVENT_EXT_SCAN_REPORT:
-		dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+		mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
 		if (adapter->ext_scan)
 			return mwifiex_handle_event_ext_scan_report(priv,
 						adapter->event_skb->data);
 		break;
 	case EVENT_TX_STATUS_REPORT:
-		dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+		mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
 		mwifiex_parse_tx_status_event(priv, adapter->event_body);
 		break;
 	case EVENT_PS_SLEEP:
-		dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+		mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
 
 		adapter->ps_state = PS_STATE_PRE_SLEEP;
 
@@ -187,12 +254,12 @@
 		break;
 
 	case EVENT_PS_AWAKE:
-		dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
 		if (!adapter->pps_uapsd_mode &&
 		    priv->media_connected && adapter->sleep_period.period) {
 				adapter->pps_uapsd_mode = true;
-				dev_dbg(adapter->dev,
-					"event: PPS/UAPSD mode activated\n");
+				mwifiex_dbg(adapter, EVENT,
+					    "event: PPS/UAPSD mode activated\n");
 		}
 		adapter->tx_lock_flag = false;
 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -218,16 +285,21 @@
 		break;
 
 	case EVENT_CHANNEL_REPORT_RDY:
-		dev_dbg(adapter->dev, "event: Channel Report\n");
+		mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
 		mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
 		break;
 	case EVENT_RADAR_DETECTED:
-		dev_dbg(adapter->dev, "event: Radar detected\n");
+		mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
 		mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
 		break;
+	case EVENT_BT_COEX_WLAN_PARA_CHANGE:
+		dev_err(adapter->dev, "EVENT: BT coex wlan param update\n");
+		mwifiex_bt_coex_wlan_param_update_event(priv,
+							adapter->event_skb);
+		break;
 	default:
-		dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
-			eventcause);
+		mwifiex_dbg(adapter, EVENT,
+			    "event: unknown event id: %#x\n", eventcause);
 		break;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 38ac4d7..8766741 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -97,14 +97,15 @@
 	struct mwifiex_txinfo *tx_info;
 	int hdr_chop;
 	struct ethhdr *p_ethhdr;
+	struct mwifiex_sta_node *src_node;
 
 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
 
 	if ((atomic_read(&adapter->pending_bridged_pkts) >=
 					     MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
-		dev_err(priv->adapter->dev,
-			"Tx: Bridge packet limit reached. Drop packet!\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Tx: Bridge packet limit reached. Drop packet!\n");
 		kfree_skb(skb);
 		mwifiex_uap_cleanup_tx_queues(priv);
 		return;
@@ -153,15 +154,15 @@
 	skb_pull(skb, hdr_chop);
 
 	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
-		dev_dbg(priv->adapter->dev,
-			"data: Tx: insufficient skb headroom %d\n",
-			skb_headroom(skb));
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "data: Tx: insufficient skb headroom %d\n",
+			    skb_headroom(skb));
 		/* Insufficient skb headroom - allocate a new skb */
 		new_skb =
 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
 		if (unlikely(!new_skb)) {
-			dev_err(priv->adapter->dev,
-				"Tx: cannot allocate new_skb\n");
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "Tx: cannot allocate new_skb\n");
 			kfree_skb(skb);
 			priv->stats.tx_dropped++;
 			return;
@@ -169,8 +170,9 @@
 
 		kfree_skb(skb);
 		skb = new_skb;
-		dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
-			skb_headroom(skb));
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: new skb headroom %d\n",
+			    skb_headroom(skb));
 	}
 
 	tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -179,6 +181,15 @@
 	tx_info->bss_type = priv->bss_type;
 	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
 
+	src_node = mwifiex_get_sta_entry(priv, rx_pkt_hdr->eth803_hdr.h_source);
+	if (src_node) {
+		src_node->stats.last_rx = jiffies;
+		src_node->stats.rx_bytes += skb->len;
+		src_node->stats.rx_packets++;
+		src_node->stats.last_tx_rate = uap_rx_pd->rx_rate;
+		src_node->stats.last_tx_htinfo = uap_rx_pd->ht_info;
+	}
+
 	if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
 		/* Update bridge packet statistics as the
 		 * packet is not going to kernel/upper layer.
@@ -225,7 +236,8 @@
 
 	/* don't do packet forwarding in disconnected state */
 	if (!priv->media_connected) {
-		dev_err(adapter->dev, "drop packet in disconnected state.\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "drop packet in disconnected state.\n");
 		dev_kfree_skb_any(skb);
 		return 0;
 	}
@@ -273,13 +285,20 @@
 	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
 
+	ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
+
 	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
 	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
-		dev_err(adapter->dev,
-			"wrong rx packet: len=%d, offset=%d, length=%d\n",
-			skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
-			le16_to_cpu(uap_rx_pd->rx_pkt_length));
+		mwifiex_dbg(adapter, ERROR,
+			    "wrong rx packet: len=%d, offset=%d, length=%d\n",
+			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+			    le16_to_cpu(uap_rx_pd->rx_pkt_length));
 		priv->stats.rx_dropped++;
+
+		node = mwifiex_get_sta_entry(priv, ta);
+		if (node)
+			node->stats.tx_failed++;
+
 		dev_kfree_skb_any(skb);
 		return 0;
 	}
@@ -287,12 +306,12 @@
 	if (rx_pkt_type == PKT_TYPE_MGMT) {
 		ret = mwifiex_process_mgmt_packet(priv, skb);
 		if (ret)
-			dev_err(adapter->dev, "Rx of mgmt packet failed");
+			mwifiex_dbg(adapter, ERROR,
+				    "Rx of mgmt packet failed");
 		dev_kfree_skb_any(skb);
 		return ret;
 	}
 
-	memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
 
 	if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
 		spin_lock_irqsave(&priv->sta_list_spinlock, flags);
@@ -354,7 +373,8 @@
 		       INTF_HEADER_LEN;
 
 	if (!skb->len) {
-		dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+		mwifiex_dbg(adapter, ERROR,
+			    "Tx: bad packet length: %d\n", skb->len);
 		tx_info->status_code = -1;
 		return skb->data;
 	}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index fd8027f..aada934 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -60,7 +60,6 @@
 static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
 			    struct sk_buff *skb, u8 ep)
 {
-	struct device *dev = adapter->dev;
 	u32 recv_type;
 	__le32 tmp;
 	int ret;
@@ -69,13 +68,15 @@
 		mwifiex_process_hs_config(adapter);
 
 	if (skb->len < INTF_HEADER_LEN) {
-		dev_err(dev, "%s: invalid skb->len\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: invalid skb->len\n", __func__);
 		return -1;
 	}
 
 	switch (ep) {
 	case MWIFIEX_USB_EP_CMD_EVENT:
-		dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
+		mwifiex_dbg(adapter, EVENT,
+			    "%s: EP_CMD_EVENT\n", __func__);
 		skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
 		recv_type = le32_to_cpu(tmp);
 		skb_pull(skb, INTF_HEADER_LEN);
@@ -83,11 +84,12 @@
 		switch (recv_type) {
 		case MWIFIEX_USB_TYPE_CMD:
 			if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
-				dev_err(dev, "CMD: skb->len too large\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "CMD: skb->len too large\n");
 				ret = -1;
 				goto exit_restore_skb;
 			} else if (!adapter->curr_cmd) {
-				dev_dbg(dev, "CMD: no curr_cmd\n");
+				mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n");
 				if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
 					mwifiex_process_sleep_confirm_resp(
 							adapter, skb->data,
@@ -104,16 +106,19 @@
 			break;
 		case MWIFIEX_USB_TYPE_EVENT:
 			if (skb->len < sizeof(u32)) {
-				dev_err(dev, "EVENT: skb->len too small\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "EVENT: skb->len too small\n");
 				ret = -1;
 				goto exit_restore_skb;
 			}
 			skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
 			adapter->event_cause = le32_to_cpu(tmp);
-			dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
+			mwifiex_dbg(adapter, EVENT,
+				    "event_cause %#x\n", adapter->event_cause);
 
 			if (skb->len > MAX_EVENT_SIZE) {
-				dev_err(dev, "EVENT: event body too large\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "EVENT: event body too large\n");
 				ret = -1;
 				goto exit_restore_skb;
 			}
@@ -125,14 +130,16 @@
 			adapter->event_skb = skb;
 			break;
 		default:
-			dev_err(dev, "unknown recv_type %#x\n", recv_type);
+			mwifiex_dbg(adapter, ERROR,
+				    "unknown recv_type %#x\n", recv_type);
 			return -1;
 		}
 		break;
 	case MWIFIEX_USB_EP_DATA:
-		dev_dbg(dev, "%s: EP_DATA\n", __func__);
+		mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__);
 		if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
-			dev_err(dev, "DATA: skb->len too large\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "DATA: skb->len too large\n");
 			return -1;
 		}
 
@@ -141,7 +148,8 @@
 		atomic_inc(&adapter->rx_pending);
 		break;
 	default:
-		dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: unknown endport %#x\n", __func__, ep);
 		return -1;
 	}
 
@@ -176,8 +184,8 @@
 
 	if (recv_length) {
 		if (urb->status || (adapter->surprise_removed)) {
-			dev_err(adapter->dev,
-				"URB status is failed: %d\n", urb->status);
+			mwifiex_dbg(adapter, ERROR,
+				    "URB status is failed: %d\n", urb->status);
 			/* Do not free skb in case of command ep */
 			if (card->rx_cmd_ep != context->ep)
 				dev_kfree_skb_any(skb);
@@ -190,8 +198,9 @@
 
 		status = mwifiex_usb_recv(adapter, skb, context->ep);
 
-		dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
-			recv_length, status);
+		mwifiex_dbg(adapter, INFO,
+			    "info: recv_length=%d, status=%d\n",
+			    recv_length, status);
 		if (status == -EINPROGRESS) {
 			mwifiex_queue_main_work(adapter);
 
@@ -203,8 +212,8 @@
 				return;
 		} else {
 			if (status == -1)
-				dev_err(adapter->dev,
-					"received data processing failed!\n");
+				mwifiex_dbg(adapter, ERROR,
+					    "received data processing failed!\n");
 
 			/* Do not free skb in case of command ep */
 			if (card->rx_cmd_ep != context->ep)
@@ -212,8 +221,8 @@
 		}
 	} else if (urb->status) {
 		if (!adapter->is_suspended) {
-			dev_warn(adapter->dev,
-				 "Card is removed: %d\n", urb->status);
+			mwifiex_dbg(adapter, FATAL,
+				    "Card is removed: %d\n", urb->status);
 			adapter->surprise_removed = true;
 		}
 		dev_kfree_skb_any(skb);
@@ -249,14 +258,17 @@
 	struct mwifiex_adapter *adapter = context->adapter;
 	struct usb_card_rec *card = adapter->card;
 
-	dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
+	mwifiex_dbg(adapter, INFO,
+		    "%s: status: %d\n", __func__, urb->status);
 
 	if (context->ep == card->tx_cmd_ep) {
-		dev_dbg(adapter->dev, "%s: CMD\n", __func__);
+		mwifiex_dbg(adapter, CMD,
+			    "%s: CMD\n", __func__);
 		atomic_dec(&card->tx_cmd_urb_pending);
 		adapter->cmd_sent = false;
 	} else {
-		dev_dbg(adapter->dev, "%s: DATA\n", __func__);
+		mwifiex_dbg(adapter, DATA,
+			    "%s: DATA\n", __func__);
 		atomic_dec(&card->tx_data_urb_pending);
 		mwifiex_write_data_complete(adapter, context->skb, 0,
 					    urb->status ? -1 : 0);
@@ -275,8 +287,8 @@
 	if (card->rx_cmd_ep != ctx->ep) {
 		ctx->skb = dev_alloc_skb(size);
 		if (!ctx->skb) {
-			dev_err(adapter->dev,
-				"%s: dev_alloc_skb failed\n", __func__);
+			mwifiex_dbg(adapter, ERROR,
+				    "%s: dev_alloc_skb failed\n", __func__);
 			return -ENOMEM;
 		}
 	}
@@ -291,7 +303,7 @@
 		atomic_inc(&card->rx_data_urb_pending);
 
 	if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
-		dev_err(adapter->dev, "usb_submit_urb failed\n");
+		mwifiex_dbg(adapter, ERROR, "usb_submit_urb failed\n");
 		dev_kfree_skb_any(ctx->skb);
 		ctx->skb = NULL;
 
@@ -468,7 +480,8 @@
 	adapter = card->adapter;
 
 	if (unlikely(adapter->is_suspended))
-		dev_warn(adapter->dev, "Device already suspended\n");
+		mwifiex_dbg(adapter, WARN,
+			    "Device already suspended\n");
 
 	mwifiex_enable_hs(adapter);
 
@@ -519,7 +532,8 @@
 	adapter = card->adapter;
 
 	if (unlikely(!adapter->is_suspended)) {
-		dev_warn(adapter->dev, "Device already resumed\n");
+		mwifiex_dbg(adapter, WARN,
+			    "Device already resumed\n");
 		return 0;
 	}
 
@@ -578,7 +592,8 @@
 
 	mwifiex_usb_free(card);
 
-	dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+	mwifiex_dbg(adapter, FATAL,
+		    "%s: removing card\n", __func__);
 	mwifiex_remove_card(adapter, &add_remove_card_sem);
 
 	usb_set_intfdata(intf, NULL);
@@ -608,7 +623,8 @@
 
 	card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!card->tx_cmd.urb) {
-		dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "tx_cmd.urb allocation failed\n");
 		return -ENOMEM;
 	}
 
@@ -620,8 +636,8 @@
 
 		card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 		if (!card->tx_data_list[i].urb) {
-			dev_err(adapter->dev,
-				"tx_data_list[] urb allocation failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "tx_data_list[] urb allocation failed\n");
 			return -ENOMEM;
 		}
 	}
@@ -639,15 +655,13 @@
 
 	card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!card->rx_cmd.urb) {
-		dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
+		mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n");
 		return -ENOMEM;
 	}
 
 	card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
-	if (!card->rx_cmd.skb) {
-		dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
+	if (!card->rx_cmd.skb)
 		return -ENOMEM;
-	}
 
 	if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
 		return -1;
@@ -658,8 +672,8 @@
 
 		card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 		if (!card->rx_data_list[i].urb) {
-			dev_err(adapter->dev,
-				"rx_data_list[] urb allocation failed\n");
+			mwifiex_dbg(adapter, ERROR,
+				    "rx_data_list[] urb allocation failed\n");
 			return -1;
 		}
 		if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
@@ -683,7 +697,8 @@
 	ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
 			   *len, &actual_length, timeout);
 	if (ret) {
-		dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "usb_bulk_msg for tx failed: %d\n", ret);
 		return ret;
 	}
 
@@ -702,7 +717,8 @@
 	ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
 			   *len, &actual_length, timeout);
 	if (ret) {
-		dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
+		mwifiex_dbg(adapter, ERROR,
+			    "usb_bulk_msg for rx failed: %d\n", ret);
 		return ret;
 	}
 
@@ -722,13 +738,13 @@
 	struct urb *tx_urb;
 
 	if (adapter->is_suspended) {
-		dev_err(adapter->dev,
-			"%s: not allowed while suspended\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: not allowed while suspended\n", __func__);
 		return -1;
 	}
 
 	if (adapter->surprise_removed) {
-		dev_err(adapter->dev, "%s: device removed\n", __func__);
+		mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
 		return -1;
 	}
 
@@ -737,7 +753,7 @@
 		return -EBUSY;
 	}
 
-	dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
+	mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
 
 	if (ep == card->tx_cmd_ep) {
 		context = &card->tx_cmd;
@@ -764,7 +780,8 @@
 		atomic_inc(&card->tx_data_urb_pending);
 
 	if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
-		dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
+		mwifiex_dbg(adapter, ERROR,
+			    "%s: usb_submit_urb failed\n", __func__);
 		if (ep == card->tx_cmd_ep) {
 			atomic_dec(&card->tx_cmd_urb_pending);
 		} else {
@@ -843,8 +860,8 @@
 	u8 check_winner = 1;
 
 	if (!firmware) {
-		dev_err(adapter->dev,
-			"No firmware image found! Terminating download\n");
+		mwifiex_dbg(adapter, ERROR,
+			    "No firmware image found! Terminating download\n");
 		ret = -1;
 		goto fw_exit;
 	}
@@ -889,8 +906,9 @@
 						MWIFIEX_USB_EP_CMD_EVENT,
 						MWIFIEX_USB_TIMEOUT);
 			if (ret) {
-				dev_err(adapter->dev,
-					"write_data_sync: failed: %d\n", ret);
+				mwifiex_dbg(adapter, ERROR,
+					    "write_data_sync: failed: %d\n",
+					    ret);
 				continue;
 			}
 
@@ -902,8 +920,9 @@
 						MWIFIEX_USB_EP_CMD_EVENT,
 						MWIFIEX_USB_TIMEOUT);
 			if (ret) {
-				dev_err(adapter->dev,
-					"read_data_sync: failed: %d\n", ret);
+				mwifiex_dbg(adapter, ERROR,
+					    "read_data_sync: failed: %d\n",
+					    ret);
 				continue;
 			}
 
@@ -913,17 +932,17 @@
 			/* check 1st firmware block resp for highest bit set */
 			if (check_winner) {
 				if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
-					dev_warn(adapter->dev,
-						 "USB is not the winner %#x\n",
-						 sync_fw.cmd);
+					mwifiex_dbg(adapter, WARN,
+						    "USB is not the winner %#x\n",
+						    sync_fw.cmd);
 
 					/* returning success */
 					ret = 0;
 					goto cleanup;
 				}
 
-				dev_dbg(adapter->dev,
-					"USB is the winner, start to download FW\n");
+				mwifiex_dbg(adapter, MSG,
+					    "start to download FW...\n");
 
 				check_winner = 0;
 				break;
@@ -931,9 +950,9 @@
 
 			/* check the firmware block response for CRC errors */
 			if (sync_fw.cmd) {
-				dev_err(adapter->dev,
-					"FW received block with CRC %#x\n",
-					sync_fw.cmd);
+				mwifiex_dbg(adapter, ERROR,
+					    "FW received block with CRC %#x\n",
+					    sync_fw.cmd);
 				ret = -1;
 				continue;
 			}
@@ -945,8 +964,8 @@
 	} while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
 
 cleanup:
-	dev_notice(adapter->dev,
-		   "info: FW download over, size %d bytes\n", tlen);
+	mwifiex_dbg(adapter, MSG,
+		    "info: FW download over, size %d bytes\n", tlen);
 
 	kfree(recv_buff);
 	kfree(fwdata);
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index b8a4587..790e619 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -26,6 +26,8 @@
 #include "11n.h"
 
 static struct mwifiex_debug_data items[] = {
+	{"debug_mask", item_size(debug_mask),
+	 item_addr(debug_mask), 1},
 	{"int_counter", item_size(int_counter),
 	 item_addr(int_counter), 1},
 	{"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
@@ -158,7 +160,8 @@
 	} else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) {
 		cmd = HostCmd_CMD_FUNC_SHUTDOWN;
 	} else {
-		dev_err(priv->adapter->dev, "unsupported parameter\n");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "unsupported parameter\n");
 		return -1;
 	}
 
@@ -178,6 +181,7 @@
 	struct mwifiex_adapter *adapter = priv->adapter;
 
 	if (info) {
+		info->debug_mask = adapter->debug_mask;
 		memcpy(info->packets_out,
 		       priv->wmm.packets_out,
 		       sizeof(priv->wmm.packets_out));
@@ -325,7 +329,7 @@
 			  struct rxpd *rx_pd)
 {
 	u16 stype;
-	u8 category, action_code;
+	u8 category, action_code, *addr2;
 	struct ieee80211_hdr *ieee_hdr = (void *)payload;
 
 	stype = (le16_to_cpu(ieee_hdr->frame_control) & IEEE80211_FCTL_STYPE);
@@ -333,21 +337,35 @@
 	switch (stype) {
 	case IEEE80211_STYPE_ACTION:
 		category = *(payload + sizeof(struct ieee80211_hdr));
-		action_code = *(payload + sizeof(struct ieee80211_hdr) + 1);
-		if (category == WLAN_CATEGORY_PUBLIC &&
-		    action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
-			dev_dbg(priv->adapter->dev,
-				"TDLS discovery response %pM nf=%d, snr=%d\n",
-				ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
-			mwifiex_auto_tdls_update_peer_signal(priv,
-							     ieee_hdr->addr2,
-							     rx_pd->snr,
-							     rx_pd->nf);
+		switch (category) {
+		case WLAN_CATEGORY_PUBLIC:
+			action_code = *(payload + sizeof(struct ieee80211_hdr)
+					+ 1);
+			if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+				addr2 = ieee_hdr->addr2;
+				mwifiex_dbg(priv->adapter, INFO,
+					    "TDLS discovery response %pM nf=%d, snr=%d\n",
+					    addr2, rx_pd->nf, rx_pd->snr);
+				mwifiex_auto_tdls_update_peer_signal(priv,
+								     addr2,
+								     rx_pd->snr,
+								     rx_pd->nf);
+			}
+			break;
+		case WLAN_CATEGORY_BACK:
+			/*we dont indicate BACK action frames to cfg80211*/
+			mwifiex_dbg(priv->adapter, INFO,
+				    "drop BACK action frames");
+			return -1;
+		default:
+			mwifiex_dbg(priv->adapter, INFO,
+				    "unknown public action frame category %d\n",
+				    category);
 		}
-		break;
 	default:
-		dev_dbg(priv->adapter->dev,
-			"unknown mgmt frame subytpe %#x\n", stype);
+		mwifiex_dbg(priv->adapter, INFO,
+		    "unknown mgmt frame subtype %#x\n", stype);
+		return 0;
 	}
 
 	return 0;
@@ -369,8 +387,8 @@
 
 	if (!priv->mgmt_frame_mask ||
 	    priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
-		dev_dbg(priv->adapter->dev,
-			"do not receive mgmt frames on uninitialized intf");
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "do not receive mgmt frames on uninitialized intf");
 		return -1;
 	}
 
@@ -383,8 +401,9 @@
 
 	ieee_hdr = (void *)skb->data;
 	if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
-		mwifiex_parse_mgmt_packet(priv, (u8 *)ieee_hdr,
-					  pkt_len, rx_pd);
+		if (mwifiex_parse_mgmt_packet(priv, (u8 *)ieee_hdr,
+					      pkt_len, rx_pd))
+			return -1;
 	}
 	/* Remove address4 */
 	memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
@@ -412,12 +431,25 @@
  */
 int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 {
+	struct mwifiex_sta_node *src_node;
+	struct ethhdr *p_ethhdr;
+
 	if (!skb)
 		return -1;
 
 	priv->stats.rx_bytes += skb->len;
 	priv->stats.rx_packets++;
 
+	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+		p_ethhdr = (void *)skb->data;
+		src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
+		if (src_node) {
+			src_node->stats.last_rx = jiffies;
+			src_node->stats.rx_bytes += skb->len;
+			src_node->stats.rx_packets++;
+		}
+	}
+
 	skb->dev = priv->netdev;
 	skb->protocol = eth_type_trans(skb, priv->netdev);
 	skb->ip_summed = CHECKSUM_NONE;
@@ -464,13 +496,14 @@
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
 			 struct cmd_ctrl_node *cmd_node)
 {
-	dev_dbg(adapter->dev, "cmd completed: status=%d\n",
-		adapter->cmd_wait_q.status);
+	mwifiex_dbg(adapter, CMD,
+		    "cmd completed: status=%d\n",
+		    adapter->cmd_wait_q.status);
 
 	*(cmd_node->condition) = true;
 
 	if (adapter->cmd_wait_q.status == -ETIMEDOUT)
-		dev_err(adapter->dev, "cmd timeout\n");
+		mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
 	else
 		wake_up_interruptible(&adapter->cmd_wait_q.wait);
 
@@ -536,13 +569,16 @@
 mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
 		       int ies_len, struct mwifiex_sta_node *node)
 {
+	struct ieee_types_header *ht_cap_ie;
 	const struct ieee80211_ht_cap *ht_cap;
 
 	if (!ies)
 		return;
 
-	ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
-	if (ht_cap) {
+	ht_cap_ie = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies,
+					     ies_len);
+	if (ht_cap_ie) {
+		ht_cap = (void *)(ht_cap_ie + 1);
 		node->is_11n_enabled = 1;
 		node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
 				  IEEE80211_HT_CAP_MAX_AMSDU ?
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index b2e9956..a8ea21c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -107,7 +107,7 @@
 
 	ra_list->total_pkt_count = 0;
 
-	dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
+	mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
 
 	return ra_list;
 }
@@ -150,7 +150,8 @@
 
 	for (i = 0; i < MAX_NUM_TID; ++i) {
 		ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
-		dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
+		mwifiex_dbg(adapter, INFO,
+			    "info: created ra_list %p\n", ra_list);
 
 		if (!ra_list)
 			break;
@@ -178,8 +179,8 @@
 			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 		}
 
-		dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
-			ra_list, ra_list->is_11n_enabled);
+		mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
+			    ra_list, ra_list->is_11n_enabled);
 
 		if (ra_list->is_11n_enabled) {
 			ra_list->ba_pkt_count = 0;
@@ -241,11 +242,12 @@
 		return;
 	}
 
-	dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
-		"qos_info Parameter Set Count=%d, Reserved=%#x\n",
-		wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
-		IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
-		wmm_ie->reserved);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: WMM Parameter IE: version=%d,\t"
+		    "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+		    wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+		    IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+		    wmm_ie->reserved);
 
 	for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
 		u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
@@ -257,10 +259,10 @@
 		priv->wmm.queue_priority[ac_idx] = ac_idx;
 		tmp[ac_idx] = avg_back_off;
 
-		dev_dbg(priv->adapter->dev,
-			"info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
-			(1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
-			cw_min, avg_back_off);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
+			    (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
+			    cw_min, avg_back_off);
 		mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
 	}
 
@@ -333,8 +335,8 @@
 {
 	int ac_val;
 
-	dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
-			"BK(0), BE(1), VI(2), VO(3)\n");
+	mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
+		    "BK(0), BE(1), VI(2), VO(3)\n");
 
 	if (!priv->wmm_enabled) {
 		/* WMM is not enabled, default priorities */
@@ -346,9 +348,10 @@
 			priv->wmm.ac_down_graded_vals[ac_val]
 				= mwifiex_wmm_eval_downgrade_ac(priv,
 						(enum mwifiex_wmm_ac_e) ac_val);
-			dev_dbg(priv->adapter->dev,
-				"info: WMM: AC PRIO %d maps to %d\n",
-				ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
+			mwifiex_dbg(priv->adapter, INFO,
+				    "info: WMM: AC PRIO %d maps to %d\n",
+				    ac_val,
+				    priv->wmm.ac_down_graded_vals[ac_val]);
 		}
 	}
 }
@@ -428,6 +431,15 @@
 							priv->tos_to_tid_inv[i];
 		}
 
+		priv->aggr_prio_tbl[6].amsdu
+					= priv->aggr_prio_tbl[6].ampdu_ap
+					= priv->aggr_prio_tbl[6].ampdu_user
+					= BA_STREAM_NOT_ALLOWED;
+
+		priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
+					= priv->aggr_prio_tbl[7].ampdu_user
+					= BA_STREAM_NOT_ALLOWED;
+
 		mwifiex_set_ba_params(priv);
 		mwifiex_reset_11n_rx_seq_num(priv);
 
@@ -512,8 +524,8 @@
 	int i;
 
 	for (i = 0; i < MAX_NUM_TID; ++i) {
-		dev_dbg(priv->adapter->dev,
-			"info: ra_list: freeing buf for tid %d\n", i);
+		mwifiex_dbg(priv->adapter, INFO,
+			    "info: ra_list: freeing buf for tid %d\n", i);
 		list_for_each_entry_safe(ra_list, tmp_node,
 					 &priv->wmm.tid_tbl_ptr[i].ra_list,
 					 list) {
@@ -685,14 +697,15 @@
 	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
 	    ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
 		if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
-			dev_dbg(adapter->dev,
-				"TDLS setup packet for %pM. Don't block\n", ra);
+			mwifiex_dbg(adapter, DATA,
+				    "TDLS setup packet for %pM.\t"
+				    "Don't block\n", ra);
 		else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
 			tdls_status = mwifiex_get_tdls_link_status(priv, ra);
 	}
 
 	if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
-		dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
+		mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
 		mwifiex_write_data_complete(adapter, skb, 0, -1);
 		return;
 	}
@@ -773,6 +786,7 @@
 {
 	u8 *curr = (u8 *) &resp->params.get_wmm_status;
 	uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
+	int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
 	bool valid = true;
 
 	struct mwifiex_ie_types_data *tlv_hdr;
@@ -780,8 +794,9 @@
 	struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
 	struct mwifiex_wmm_ac_status *ac_status;
 
-	dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
-		resp_len);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
+		    resp_len);
 
 	while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
 		tlv_hdr = (struct mwifiex_ie_types_data *) curr;
@@ -795,12 +810,12 @@
 			tlv_wmm_qstatus =
 				(struct mwifiex_ie_types_wmm_queue_status *)
 				tlv_hdr;
-			dev_dbg(priv->adapter->dev,
-				"info: CMD_RESP: WMM_GET_STATUS:"
-				" QSTATUS TLV: %d, %d, %d\n",
-				tlv_wmm_qstatus->queue_index,
-				tlv_wmm_qstatus->flow_required,
-				tlv_wmm_qstatus->disabled);
+			mwifiex_dbg(priv->adapter, CMD,
+				    "info: CMD_RESP: WMM_GET_STATUS:\t"
+				    "QSTATUS TLV: %d, %d, %d\n",
+				    tlv_wmm_qstatus->queue_index,
+				    tlv_wmm_qstatus->flow_required,
+				    tlv_wmm_qstatus->disabled);
 
 			ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
 							 queue_index];
@@ -823,11 +838,10 @@
 			wmm_param_ie->vend_hdr.element_id =
 						WLAN_EID_VENDOR_SPECIFIC;
 
-			dev_dbg(priv->adapter->dev,
-				"info: CMD_RESP: WMM_GET_STATUS:"
-				" WMM Parameter Set Count: %d\n",
-				wmm_param_ie->qos_info_bitmap &
-				IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
+			mwifiex_dbg(priv->adapter, CMD,
+				    "info: CMD_RESP: WMM_GET_STATUS:\t"
+				    "WMM Parameter Set Count: %d\n",
+				    wmm_param_ie->qos_info_bitmap & mask);
 
 			memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
 			       wmm_ie, wmm_param_ie,
@@ -875,9 +889,9 @@
 	if (!wmm_ie)
 		return 0;
 
-	dev_dbg(priv->adapter->dev,
-		"info: WMM: process assoc req: bss->wmm_ie=%#x\n",
-		wmm_ie->vend_hdr.element_id);
+	mwifiex_dbg(priv->adapter, INFO,
+		    "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
+		    wmm_ie->vend_hdr.element_id);
 
 	if ((priv->wmm_required ||
 	     (ht_cap && (priv->adapter->config_bands & BAND_GN ||
@@ -927,8 +941,8 @@
 	 */
 	ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
 
-	dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
-				" %d ms sent to FW\n", queue_delay, ret_val);
+	mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
+		    "%d ms sent to FW\n", queue_delay, ret_val);
 
 	return ret_val;
 }
@@ -1082,14 +1096,15 @@
 	if (skb_queue_empty(&ptr->skb_head)) {
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 				       ra_list_flags);
-		dev_dbg(adapter->dev, "data: nothing to send\n");
+		mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
 		return;
 	}
 
 	skb = skb_dequeue(&ptr->skb_head);
 
 	tx_info = MWIFIEX_SKB_TXCB(skb);
-	dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
+	mwifiex_dbg(adapter, DATA,
+		    "data: dequeuing the packet %p %p\n", ptr, skb);
 
 	ptr->total_pkt_count--;
 
@@ -1205,7 +1220,7 @@
 
 	switch (ret) {
 	case -EBUSY:
-		dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
 
 		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
@@ -1224,7 +1239,7 @@
 	case -1:
 		if (adapter->iface_type != MWIFIEX_PCIE)
 			adapter->data_sent = false;
-		dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
+		mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
@@ -1263,7 +1278,7 @@
 
 	tid = mwifiex_get_tid(ptr);
 
-	dev_dbg(adapter->dev, "data: tid=%d\n", tid);
+	mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
 
 	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
 	if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9592116..77361af 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2380,7 +2380,7 @@
 	if (cap & MWL8K_CAP_GREENFIELD)
 		band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
 	if (cap & MWL8K_CAP_AMPDU) {
-		hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 		band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
 		band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
 	}
@@ -5192,7 +5192,7 @@
 		priv->sniffer_enabled = true;
 	}
 
-	*total_flags &=	FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
+	*total_flags &=	FIF_ALLMULTI |
 			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
 			FIF_OTHER_BSS;
 
@@ -5431,7 +5431,7 @@
 	u8 *addr = sta->addr, idx;
 	struct mwl8k_sta *sta_info = MWL8K_STA(sta);
 
-	if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+	if (!ieee80211_hw_check(hw, AMPDU_AGGREGATION))
 		return -ENOTSUPP;
 
 	spin_lock(&priv->stream_lock);
@@ -6076,14 +6076,15 @@
 	hw->queues = MWL8K_TX_WMM_QUEUES;
 
 	/* Set rssi values to dBm */
-	hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
 
 	/*
 	 * Ask mac80211 to not to trigger PS mode
 	 * based on PM bit of incoming frames.
 	 */
 	if (priv->ap_fw)
-		hw->flags |= IEEE80211_HW_AP_LINK_PS;
+		ieee80211_hw_set(hw, AP_LINK_PS);
 
 	hw->vif_data_size = sizeof(struct mwl8k_vif);
 	hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 275408e..257a9ea 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -351,8 +351,7 @@
 		 * "TRANSPARENT and PROMISCUOUS are mutually exclusive"
 		 * STSW45X0C LMAC API - page 12
 		 */
-		if (((priv->filter_flags & FIF_PROMISC_IN_BSS) ||
-		     (priv->filter_flags & FIF_OTHER_BSS)) &&
+		if (priv->filter_flags & FIF_OTHER_BSS &&
 		    (mode != P54_FILTER_TYPE_PROMISCUOUS))
 			mode |= P54_FILTER_TYPE_TRANSPARENT;
 	} else {
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 1f6fd5f..9a8fedd 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -83,7 +83,7 @@
 
 static int p54_register_led(struct p54_common *priv,
 			    unsigned int led_index,
-			    char *name, char *trigger)
+			    char *name, const char *trigger)
 {
 	struct p54_led_dev *led = &priv->leds[led_index];
 	int err;
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index e79674f..7805864 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -395,13 +395,11 @@
 {
 	struct p54_common *priv = dev->priv;
 
-	*total_flags &= FIF_PROMISC_IN_BSS |
-			FIF_ALLMULTI |
-			FIF_OTHER_BSS;
+	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS;
 
 	priv->filter_flags = *total_flags;
 
-	if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+	if (changed_flags & FIF_OTHER_BSS)
 		p54_setup_mac(priv);
 
 	if (changed_flags & FIF_ALLMULTI || multicast)
@@ -748,12 +746,12 @@
 	spin_lock_init(&priv->tx_stats_lock);
 	skb_queue_head_init(&priv->tx_queue);
 	skb_queue_head_init(&priv->tx_pending);
-	dev->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		     IEEE80211_HW_SIGNAL_DBM |
-		     IEEE80211_HW_SUPPORTS_PS |
-		     IEEE80211_HW_PS_NULLFUNC_STACK |
-		     IEEE80211_HW_MFP_CAPABLE |
-		     IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+	ieee80211_hw_set(dev, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(dev, MFP_CAPABLE);
+	ieee80211_hw_set(dev, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(dev, SUPPORTS_PS);
+	ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+	ieee80211_hw_set(dev, SIGNAL_DBM);
 
 	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				      BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 477f863..0881ba8 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -143,7 +143,7 @@
 static char *essid;
 
 /* Default to encapsulation unless translation requested */
-static bool translate = 1;
+static bool translate = true;
 
 static int country = USA;
 
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d72ff8e..71a825c 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -356,9 +356,9 @@
 #define CAP_MODE_80211G		4
 #define CAP_MODE_MASK		7
 
-#define WORK_LINK_UP		(1<<0)
-#define WORK_LINK_DOWN		(1<<1)
-#define WORK_SET_MULTICAST_LIST	(1<<2)
+#define WORK_LINK_UP		0
+#define WORK_LINK_DOWN		1
+#define WORK_SET_MULTICAST_LIST	2
 
 #define RNDIS_WLAN_ALG_NONE	0
 #define RNDIS_WLAN_ALG_WEP	(1<<0)
@@ -2861,7 +2861,7 @@
 
 		deauthenticate(usbdev);
 
-		cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
+		cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL);
 	}
 
 	netif_carrier_off(usbdev->net);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index aeaf87b..7e804324 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1062,10 +1062,9 @@
 	hw->priv = adapter;
 	adapter->hw = hw;
 
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_HAS_RATE_CONTROL |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    0;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 
 	hw->queues = MAX_HW_QUEUES;
 	hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index bdf5590..9a3966c 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -273,10 +273,8 @@
 			   !(filter_flags & FIF_PLCPFAIL));
 	rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
 			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
 	rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
-			   !(filter_flags & FIF_PROMISC_IN_BSS) &&
 			   !rt2x00dev->intf_ap_count);
 	rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
 	rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
@@ -1576,10 +1574,10 @@
 	/*
 	 * Initialize all hw fields.
 	 */
-	rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-			       IEEE80211_HW_SIGNAL_DBM |
-			       IEEE80211_HW_SUPPORTS_PS |
-			       IEEE80211_HW_PS_NULLFUNC_STACK;
+	ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+	ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 79f4fe6..1a6740b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -274,10 +274,8 @@
 			   !(filter_flags & FIF_PLCPFAIL));
 	rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
 			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
 	rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
-			   !(filter_flags & FIF_PROMISC_IN_BSS) &&
 			   !rt2x00dev->intf_ap_count);
 	rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
 	rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
@@ -1871,10 +1869,10 @@
 	/*
 	 * Initialize all hw fields.
 	 */
-	rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-			       IEEE80211_HW_SIGNAL_DBM |
-			       IEEE80211_HW_SUPPORTS_PS |
-			       IEEE80211_HW_PS_NULLFUNC_STACK;
+	ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+	ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 05c6459..b50d873 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -434,10 +434,8 @@
 			   !(filter_flags & FIF_PLCPFAIL));
 	rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
 			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
 	rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
-			   !(filter_flags & FIF_PROMISC_IN_BSS) &&
 			   !rt2x00dev->intf_ap_count);
 	rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
 	rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
@@ -1698,11 +1696,10 @@
 	 * multicast and broadcast traffic immediately instead of buffering it
 	 * infinitly and thus dropping it after some time.
 	 */
-	rt2x00dev->hw->flags =
-	    IEEE80211_HW_RX_INCLUDES_FCS |
-	    IEEE80211_HW_SIGNAL_DBM |
-	    IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_PS_NULLFUNC_STACK;
+	ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+	ieee80211_hw_set(rt2x00dev->hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
 
 	/*
 	 * Disable powersaving as default.
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index ebd5625..95c1d7c 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -2961,6 +2961,15 @@
 #define BCN_TBTT_OFFSET 64
 
 /*
+ * Hardware has 255 WCID table entries. First 32 entries are reserved for
+ * shared keys. Since parts of the pairwise key table might be shared with
+ * the beacon frame buffers 6 & 7 we could only use the first 222 entries.
+ */
+#define WCID_START	33
+#define WCID_END	222
+#define STA_IDS_SIZE	(WCID_END - WCID_START + 2)
+
+/*
  * RT2800 driver data structure
  */
 struct rt2800_drv_data {
@@ -2971,6 +2980,7 @@
 	u8 txmixer_gain_24g;
 	u8 txmixer_gain_5g;
 	unsigned int tbtt_tick;
+	DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
 };
 
 #endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index be2d54f..9524564 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1381,38 +1381,6 @@
 }
 EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
 
-static inline int rt2800_find_wcid(struct rt2x00_dev *rt2x00dev)
-{
-	struct mac_wcid_entry wcid_entry;
-	int idx;
-	u32 offset;
-
-	/*
-	 * Search for the first free WCID entry and return the corresponding
-	 * index.
-	 *
-	 * Make sure the WCID starts _after_ the last possible shared key
-	 * entry (>32).
-	 *
-	 * Since parts of the pairwise key table might be shared with
-	 * the beacon frame buffers 6 & 7 we should only write into the
-	 * first 222 entries.
-	 */
-	for (idx = 33; idx <= 222; idx++) {
-		offset = MAC_WCID_ENTRY(idx);
-		rt2800_register_multiread(rt2x00dev, offset, &wcid_entry,
-					  sizeof(wcid_entry));
-		if (is_broadcast_ether_addr(wcid_entry.mac))
-			return idx;
-	}
-
-	/*
-	 * Use -1 to indicate that we don't have any more space in the WCID
-	 * table.
-	 */
-	return -1;
-}
-
 int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
 			       struct rt2x00lib_crypto *crypto,
 			       struct ieee80211_key_conf *key)
@@ -1425,7 +1393,7 @@
 		 * Allow key configuration only for STAs that are
 		 * known by the hw.
 		 */
-		if (crypto->wcid < 0)
+		if (crypto->wcid > WCID_END)
 			return -ENOSPC;
 		key->hw_key_idx = crypto->wcid;
 
@@ -1455,11 +1423,13 @@
 {
 	int wcid;
 	struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+	struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
 
 	/*
-	 * Find next free WCID.
+	 * Search for the first free WCID entry and return the corresponding
+	 * index.
 	 */
-	wcid = rt2800_find_wcid(rt2x00dev);
+	wcid = find_first_zero_bit(drv_data->sta_ids, STA_IDS_SIZE) + WCID_START;
 
 	/*
 	 * Store selected wcid even if it is invalid so that we can
@@ -1471,9 +1441,11 @@
 	 * No space left in the device, however, we can still communicate
 	 * with the STA -> No error.
 	 */
-	if (wcid < 0)
+	if (wcid > WCID_END)
 		return 0;
 
+	__set_bit(wcid - WCID_START, drv_data->sta_ids);
+
 	/*
 	 * Clean up WCID attributes and write STA address to the device.
 	 */
@@ -1487,11 +1459,16 @@
 
 int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid)
 {
+	struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+
+	if (wcid > WCID_END)
+		return 0;
 	/*
 	 * Remove WCID entry, no need to clean the attributes as they will
 	 * get renewed when the WCID is reused.
 	 */
 	rt2800_config_wcid(rt2x00dev, NULL, wcid);
+	__clear_bit(wcid - WCID_START, drv_data->sta_ids);
 
 	return 0;
 }
@@ -1513,8 +1490,7 @@
 			   !(filter_flags & FIF_FCSFAIL));
 	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
 			   !(filter_flags & FIF_PLCPFAIL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
 	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
 	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
 	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
@@ -7498,13 +7474,12 @@
 	/*
 	 * Initialize all hw fields.
 	 */
-	rt2x00dev->hw->flags =
-	    IEEE80211_HW_SIGNAL_DBM |
-	    IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_PS_NULLFUNC_STACK |
-	    IEEE80211_HW_AMPDU_AGGREGATION |
-	    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-	    IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
 
 	/*
 	 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
@@ -7514,8 +7489,7 @@
 	 * infinitly and thus dropping it after some time.
 	 */
 	if (!rt2x00_is_usb(rt2x00dev))
-		rt2x00dev->hw->flags |=
-			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+		ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -7818,21 +7792,25 @@
 /*
  * IEEE80211 stack callback functions.
  */
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
-			 u16 *iv16)
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+			struct ieee80211_key_conf *key,
+			struct ieee80211_key_seq *seq)
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct mac_iveiv_entry iveiv_entry;
 	u32 offset;
 
-	offset = MAC_IVEIV_ENTRY(hw_key_idx);
+	if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+		return;
+
+	offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
 	rt2800_register_multiread(rt2x00dev, offset,
 				      &iveiv_entry, sizeof(iveiv_entry));
 
-	memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
-	memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
+	memcpy(&seq->tkip.iv16, &iveiv_entry.iv[0], 2);
+	memcpy(&seq->tkip.iv32, &iveiv_entry.iv[4], 4);
 }
-EXPORT_SYMBOL_GPL(rt2800_get_tkip_seq);
+EXPORT_SYMBOL_GPL(rt2800_get_key_seq);
 
 int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
@@ -7967,11 +7945,11 @@
 	/*
 	 * Don't allow aggregation for stations the hardware isn't aware
 	 * of because tx status reports for frames to an unknown station
-	 * always contain wcid=255 and thus we can't distinguish between
-	 * multiple stations which leads to unwanted situations when the
-	 * hw reorders frames due to aggregation.
+	 * always contain wcid=WCID_END+1 and thus we can't distinguish
+	 * between multiple stations which leads to unwanted situations
+	 * when the hw reorders frames due to aggregation.
 	 */
-	if (sta_priv->wcid < 0)
+	if (sta_priv->wcid > WCID_END)
 		return 1;
 
 	switch (action) {
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 3019db6..1609b8a 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -209,8 +209,9 @@
 
 int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
 
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
-			 u16 *iv16);
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+			struct ieee80211_key_conf *key,
+			struct ieee80211_key_seq *seq);
 int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
 int rt2800_conf_tx(struct ieee80211_hw *hw,
 		   struct ieee80211_vif *vif, u16 queue_idx,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cc1b3cc..0af2257 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -309,7 +309,7 @@
 	.sw_scan_start		= rt2x00mac_sw_scan_start,
 	.sw_scan_complete	= rt2x00mac_sw_scan_complete,
 	.get_stats		= rt2x00mac_get_stats,
-	.get_tkip_seq		= rt2800_get_tkip_seq,
+	.get_key_seq		= rt2800_get_key_seq,
 	.set_rts_threshold	= rt2800_set_rts_threshold,
 	.sta_add		= rt2x00mac_sta_add,
 	.sta_remove		= rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index aaa7aa4..a985a5a 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -148,7 +148,7 @@
 	.sw_scan_start		= rt2x00mac_sw_scan_start,
 	.sw_scan_complete	= rt2x00mac_sw_scan_complete,
 	.get_stats		= rt2x00mac_get_stats,
-	.get_tkip_seq		= rt2800_get_tkip_seq,
+	.get_key_seq		= rt2800_get_key_seq,
 	.set_rts_threshold	= rt2800_set_rts_threshold,
 	.sta_add		= rt2x00mac_sta_add,
 	.sta_remove		= rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6ec2466..5932306 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -835,7 +835,7 @@
 	.sw_scan_start		= rt2x00mac_sw_scan_start,
 	.sw_scan_complete	= rt2x00mac_sw_scan_complete,
 	.get_stats		= rt2x00mac_get_stats,
-	.get_tkip_seq		= rt2800_get_tkip_seq,
+	.get_key_seq		= rt2800_get_key_seq,
 	.set_rts_threshold	= rt2800_set_rts_threshold,
 	.sta_add		= rt2x00mac_sta_add,
 	.sta_remove		= rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 300876d..3c26ee6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -359,8 +359,7 @@
 	    FIF_PLCPFAIL |
 	    FIF_CONTROL |
 	    FIF_PSPOLL |
-	    FIF_OTHER_BSS |
-	    FIF_PROMISC_IN_BSS;
+	    FIF_OTHER_BSS;
 
 	/*
 	 * Apply some rules to the filters:
@@ -369,9 +368,6 @@
 	 * - Multicast filter seems to kill broadcast traffic so never use it.
 	 */
 	*total_flags |= FIF_ALLMULTI;
-	if (*total_flags & FIF_OTHER_BSS ||
-	    *total_flags & FIF_PROMISC_IN_BSS)
-		*total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
 
 	/*
 	 * If the device has a single filter for all control frames,
@@ -539,16 +535,8 @@
 		      struct ieee80211_sta *sta)
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
-	struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
 
-	/*
-	 * If there's no space left in the device table store
-	 * -1 as wcid but tell mac80211 everything went ok.
-	 */
-	if (rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta))
-		sta_priv->wcid = -1;
-
-	return 0;
+	return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
 
@@ -558,12 +546,6 @@
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
 
-	/*
-	 * If we never sent the STA to the device no need to clean it up.
-	 */
-	if (sta_priv->wcid < 0)
-		return 0;
-
 	return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 8194550..c0e730e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -530,10 +530,8 @@
 			   !(filter_flags & FIF_PLCPFAIL));
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
 			   !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
-			   !(filter_flags & FIF_PROMISC_IN_BSS) &&
 			   !rt2x00dev->intf_ap_count);
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
@@ -2760,11 +2758,10 @@
 	/*
 	 * Initialize all hw fields.
 	 */
-	rt2x00dev->hw->flags =
-	    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-	    IEEE80211_HW_SIGNAL_DBM |
-	    IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_PS_NULLFUNC_STACK;
+	ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+	ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a5458cf..7081e13 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -480,10 +480,8 @@
 			   !(filter_flags & FIF_PLCPFAIL));
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
 			   !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
-			   !(filter_flags & FIF_PROMISC_IN_BSS) &&
 			   !rt2x00dev->intf_ap_count);
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
 	rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
@@ -2107,16 +2105,15 @@
 	/*
 	 * Initialize all hw fields.
 	 *
-	 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are
+	 * Don't set IEEE80211_HOST_BROADCAST_PS_BUFFERING unless we are
 	 * capable of sending the buffered frames out after the DTIM
 	 * transmission using rt2x00lib_beacondone. This will send out
 	 * multicast and broadcast traffic immediately instead of buffering it
 	 * infinitly and thus dropping it after some time.
 	 */
-	rt2x00dev->hw->flags =
-	    IEEE80211_HW_SIGNAL_DBM |
-	    IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_PS_NULLFUNC_STACK;
+	ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
+	ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 706b844..a43a16f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1802,8 +1802,9 @@
 	priv->band.n_bitrates = 4;
 	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
 
-	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-		IEEE80211_HW_RX_INCLUDES_FCS;
+	ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+
 	dev->vif_data_size = sizeof(struct rtl8180_vif);
 	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 					BIT(NL80211_IFTYPE_ADHOC);
@@ -1868,9 +1869,9 @@
 	}
 
 	if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
-		dev->flags |= IEEE80211_HW_SIGNAL_DBM;
+		ieee80211_hw_set(dev, SIGNAL_DBM);
 	else
-		dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
+		ieee80211_hw_set(dev, SIGNAL_UNSPEC);
 
 	rtl8180_eeprom_read(priv);
 
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 629ad8c..b7f72f9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1478,9 +1478,9 @@
 	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
 
 
-	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-		     IEEE80211_HW_SIGNAL_DBM |
-		     IEEE80211_HW_RX_INCLUDES_FCS;
+	ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+	ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(dev, SIGNAL_DBM);
 	/* Initialize rate-control variables */
 	dev->max_rates = 1;
 	dev->max_rate_tries = RETRY_COUNT;
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 5cf509d..73067ca 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -100,7 +100,7 @@
 	select RTLWIFI_PCI
 	select RTLBTCOEXIST
 	---help---
-	This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe
+	This is the driver for Realtek RTL8821AE/RTL8812AE 802.11ac PCIe
 	wireless network adapters.
 
 	If you choose to build it as a module, it will be called rtl8821ae
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 01f56c7..0517a4f 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -394,20 +394,18 @@
 		}
 	}
 	/* <5> set hw caps */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-	    IEEE80211_HW_RX_INCLUDES_FCS |
-	    IEEE80211_HW_AMPDU_AGGREGATION |
-	    IEEE80211_HW_CONNECTION_MONITOR |
-	    /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
-	    IEEE80211_HW_MFP_CAPABLE |
-	    IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
 
 	/* swlps or hwlps has been set in diff chip in init_sw_vars */
-	if (rtlpriv->psc.swctrl_lps)
-		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-			IEEE80211_HW_PS_NULLFUNC_STACK |
-			/* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
-			0;
+	if (rtlpriv->psc.swctrl_lps) {
+		ieee80211_hw_set(hw, SUPPORTS_PS);
+		ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	}
 	hw->wiphy->interface_modes =
 	    BIT(NL80211_IFTYPE_AP) |
 	    BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
index cefe269..f2b9d11 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -1286,8 +1286,11 @@
 						    0x12, 0xe1, 0x90);
 			break;
 		case 3:
-			btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
-						    0x3, 0xf1, 0x90);
+			/* This call breaks BT when wireless is active -
+			 * comment it out for now until a better fix is found:
+			 * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+			 *			    0x3, 0xf1, 0x90);
+			 */
 			break;
 		case 4:
 			btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 82733c6..782ac2f 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -27,8 +27,7 @@
 #define __RTL_CORE_H__
 
 #define RTL_SUPPORTED_FILTERS		\
-	(FIF_PROMISC_IN_BSS | \
-	FIF_ALLMULTI | FIF_CONTROL | \
+	(FIF_ALLMULTI | FIF_CONTROL | \
 	FIF_OTHER_BSS | \
 	FIF_FCSFAIL | \
 	FIF_BCN_PRBRESP_PROMISC)
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 1893d01..a62bf0a 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -40,6 +40,7 @@
 	{COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
 	{COUNTRY_CODE_WORLD_WIDE_13, "EC"},
 	{COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+	{COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
 };
 
 /*
@@ -124,6 +125,17 @@
 		      }
 };
 
+static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
+	.n_reg_rules = 4,
+	.alpha2 = "99",
+	.reg_rules = {
+			RTL819x_2GHZ_CH01_11,
+			RTL819x_2GHZ_CH12_13,
+			RTL819x_5GHZ_5150_5350,
+			RTL819x_5GHZ_5470_5850,
+		}
+};
+
 static const struct ieee80211_regdomain rtl_regdom_14 = {
 	.n_reg_rules = 3,
 	.alpha2 = "99",
@@ -348,6 +360,8 @@
 		return &rtl_regdom_14_60_64;
 	case COUNTRY_CODE_GLOBAL_DOMAIN:
 		return &rtl_regdom_14;
+	case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
+		return &rtl_regdom_12_13_5g_all;
 	default:
 		return &rtl_regdom_no_midband;
 	}
@@ -384,6 +398,25 @@
 	return NULL;
 }
 
+static u8 channel_plan_to_country_code(u8 channelplan)
+{
+	switch (channelplan) {
+	case 0x20:
+	case 0x21:
+		return COUNTRY_CODE_WORLD_WIDE_13;
+	case 0x22:
+		return COUNTRY_CODE_IC;
+	case 0x32:
+		return COUNTRY_CODE_TELEC_NETGEAR;
+	case 0x41:
+		return COUNTRY_CODE_GLOBAL_DOMAIN;
+	case 0x7f:
+		return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
+	default:
+		return COUNTRY_CODE_MAX; /*Error*/
+	}
+}
+
 int rtl_regd_init(struct ieee80211_hw *hw,
 		  void (*reg_notifier)(struct wiphy *wiphy,
 				       struct regulatory_request *request))
@@ -396,11 +429,12 @@
 		return -EINVAL;
 
 	/* init country_code from efuse channel plan */
-	rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+	rtlpriv->regd.country_code =
+		channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
 
-	RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
-		 "rtl: EEPROM regdomain: 0x%0x\n",
-		  rtlpriv->regd.country_code);
+	RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+		 "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+		 rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
 
 	if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
 		RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index 3bbbaaa..f7f15bc 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -49,6 +49,7 @@
 	COUNTRY_CODE_GLOBAL_DOMAIN = 10,
 	COUNTRY_CODE_WORLD_WIDE_13 = 11,
 	COUNTRY_CODE_TELEC_NETGEAR = 12,
+	COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
 
 	/*add new channel plan above this line */
 	COUNTRY_CODE_MAX
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index d930c1f..ce4da9d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -1123,23 +1123,22 @@
 void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	static u8 tm_trigger;
 
 	if (!rtlpriv->dm.txpower_tracking)
 		return;
 
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17)|BIT(16),
 			      0x03);
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Trigger 88E Thermal Meter!!\n");
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 		return;
 	} else {
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Schedule TxPowerTracking !!\n");
 				dm_txpower_track_cb_therm(hw);
-		tm_trigger = 0;
+		rtlpriv->dm.tm_trigger = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 86ce5b1..8ee83b0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -1354,27 +1354,11 @@
 	}
 }
 
-static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u32 tmp;
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	rtl88ee_clear_interrupt(hw);/*clear it here first*/
 	rtl_write_dword(rtlpriv, REG_HIMR,
 			rtlpci->irq_mask[0] & 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv, REG_HIMRE,
@@ -1919,8 +1903,8 @@
 		 "dev_addr: %pM\n", rtlefuse->dev_addr);
 	/*channel plan */
 	rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
-	/* set channel paln to world wide 13 */
-	rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+	/* set channel plan from efuse */
+	rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 	/*tx power*/
 	_rtl88ee_read_txpower_info_from_hwpg(hw,
 					     rtlefuse->autoload_failflag,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
index ef28c8e..02013df 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
@@ -23,7 +23,7 @@
  *
  *****************************************************************************/
 
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
 #include "pwrseq.h"
 
 /* drivers should parse below arrays and do the corresponding actions */
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
index 7910334..f2d9c61 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -26,7 +26,7 @@
 #ifndef __RTL8723E_PWRSEQ_H__
 #define __RTL8723E_PWRSEQ_H__
 
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
 /* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd
  *	There are 6 HW Power States:
  *	0: POFF--Power Off
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f5ee67c..0aca6f4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -1169,23 +1169,22 @@
 						struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	static u8 tm_trigger;
 
 	if (!rtlpriv->dm.txpower_tracking)
 		return;
 
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
 			      0x60);
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Trigger 92S Thermal Meter!!\n");
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 		return;
 	} else {
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Schedule TxPowerTracking direct call!!\n");
 		rtl92c_dm_txpower_tracking_directcall(hw);
-		tm_trigger = 0;
+		rtlpriv->dm.tm_trigger = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 29983bc..14b819e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -233,13 +233,14 @@
 	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
 	pfwdata = (u8 *)rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
-
 	if (IS_FW_HEADER_EXIST(pfwheader)) {
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
 			  (int)sizeof(struct rtl92c_firmware_header));
 
+		rtlhal->fw_version = pfwheader->version;
+		rtlhal->fw_subversion = pfwheader->subversion;
 		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
 		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
 	}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index d310d55..767358a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -840,6 +840,26 @@
 	rtl92c_set_data_filter(hw, value16);
 }
 
+static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+	rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+	/* TODO: Remove these magic number */
+	rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
+	rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
+	rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+	/* Change beacon AIFS to the largest number
+	 * beacause test chip does not contension before sending beacon.
+	 */
+	if (IS_NORMAL_CHIP(rtlhal->version))
+		rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+	else
+		rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
 static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -887,9 +907,9 @@
 	_rtl92cu_init_usb_aggregation(hw);
 	rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
 	rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
-	rtl92c_init_beacon_parameters(hw, rtlhal->version);
+	_rtl92cu_init_beacon_parameters(hw);
 	rtl92c_init_ampdu_aggregation(hw);
-	rtl92c_init_beacon_max_error(hw, true);
+	rtl92c_init_beacon_max_error(hw);
 	return err;
 }
 
@@ -987,7 +1007,6 @@
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	int err = 0;
-	static bool iqk_initialized;
 	unsigned long flags;
 
 	/* As this function can take a very long time (up to 350 ms)
@@ -1038,11 +1057,11 @@
 	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
 	if (ppsc->rfpwr_state == ERFON) {
 		rtl92c_phy_set_rfpath_switch(hw, 1);
-		if (iqk_initialized) {
+		if (rtlphy->iqk_initialized) {
 			rtl92c_phy_iq_calibrate(hw, true);
 		} else {
 			rtl92c_phy_iq_calibrate(hw, false);
-			iqk_initialized = true;
+			rtlphy->iqk_initialized = true;
 		}
 		rtl92c_dm_check_txpower_tracking(hw);
 		rtl92c_phy_lc_calibrate(hw);
@@ -1323,7 +1342,6 @@
 	enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
 
 	bt_msr &= 0xfc;
-	rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
 	if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
 	    NL80211_IFTYPE_STATION) {
 		_rtl92cu_stop_tx_beacon(hw);
@@ -1392,6 +1410,9 @@
 		_CardDisableHWSM(hw);
 	else
 		_CardDisableWithoutHWSM(hw);
+
+	/* after power off we should do iqk again */
+	rtlpriv->phy.iqk_initialized = false;
 }
 
 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
@@ -1452,25 +1473,6 @@
 	return 0;
 }
 
-static void _InitBeaconParameters(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
-	rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
-
-	/* TODO: Remove these magic number */
-	rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
-	rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
-	rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
-	/* Change beacon AIFS to the largest number
-	 * beacause test chip does not contension before sending beacon. */
-	if (IS_NORMAL_CHIP(rtlhal->version))
-		rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
-	else
-		rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
-}
-
 static void _beacon_function_enable(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1491,7 +1493,7 @@
 	atim_window = 2;	/*FIX MERGE */
 	rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
 	rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
-	_InitBeaconParameters(hw);
+	_rtl92cu_init_beacon_parameters(hw);
 	rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
 	/*
 	 * Force beacon frame transmission even after receiving beacon frame
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index adb8107..490a7cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -613,7 +613,7 @@
 	rtl_write_word(rtlpriv, 0x4CA, 0x0708);
 }
 
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -641,21 +641,6 @@
 	rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
 }
 
-void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
-				   enum version_8192c version)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
-	rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
-	rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
-	rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
-	if (IS_NORMAL_CHIP(rtlhal->version))
-		rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
-	else
-		rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
-}
-
 void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index bf53652..e34f0f1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -66,13 +66,10 @@
 
 void rtl92c_init_edca(struct ieee80211_hw *hw);
 void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw);
 void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
 void rtl92c_init_retry_function(struct ieee80211_hw *hw);
 
-void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
-				   enum version_8192c version);
-
 void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
 void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 12f6d47..c972fa5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -108,13 +108,8 @@
 bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
 {
 	bool rtstatus;
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	bool is92c = IS_92C_SERIAL(rtlhal->version);
 
 	rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
-	if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
-		rtl_write_byte(rtlpriv, 0x14, 0x71);
 	return rtstatus;
 }
 
@@ -122,7 +117,6 @@
 {
 	bool rtstatus = true;
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	u16 regval;
 	u32 regval32;
 	u8 b_reg_hwparafile = 1;
@@ -134,17 +128,11 @@
 	rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
 	rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
 	rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
-	if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
-		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
-			       FEN_DIO_PCIE |	FEN_BB_GLB_RSTn | FEN_BBRSTB);
-	} else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
-		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
-			       FEN_BB_GLB_RSTn | FEN_BBRSTB);
-	}
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
+		       FEN_BB_GLB_RSTn | FEN_BBRSTB);
 	regval32 = rtl_read_dword(rtlpriv, 0x87c);
 	rtl_write_dword(rtlpriv, 0x87c, regval32 & (~BIT(31)));
-	if (IS_HARDWARE_TYPE_8192CU(rtlhal))
-		rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+	rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
 	rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
 	if (b_reg_hwparafile == 1)
 		rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
@@ -162,7 +150,7 @@
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
 	arraylength =  rtlphy->hwparam_tables[MAC_REG].length ;
 	ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n");
 	for (i = 0; i < arraylength; i = i + 2)
 		rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
 	return true;
@@ -259,18 +247,18 @@
 		radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
 		radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-			 "Radio_A:RTL8192CERADIOA_2TARRAY\n");
+			 "Radio_A:RTL8192CURADIOA_2TARRAY\n");
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-			 "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
+			 "Radio_B:RTL8192CU_RADIOB_2TARRAY\n");
 	} else {
 		radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
 		radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
 		radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
 		radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-			 "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
+			 "Radio_A:RTL8192CU_RADIOA_1TARRAY\n");
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-			 "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
+			 "Radio_B:RTL8192CU_RADIOB_1TARRAY\n");
 	}
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
 	switch (rfpath) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index b878d56..5624ade 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -66,7 +66,6 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
-	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
 	u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
@@ -74,14 +73,8 @@
 	u8 idx1, idx2;
 	u8 *ptr;
 
-	if (rtlhal->interface == INTF_PCI) {
-		if (rtlefuse->eeprom_regulatory != 0)
-			turbo_scanoff = true;
-	} else {
-		if ((rtlefuse->eeprom_regulatory != 0) ||
-		    (rtlefuse->external_pa))
-			turbo_scanoff = true;
-	}
+	if ((rtlefuse->eeprom_regulatory != 0) || (rtlefuse->external_pa))
+		turbo_scanoff = true;
 	if (mac->act_scanning) {
 		tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
 		tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
@@ -90,11 +83,8 @@
 			    (ppowerlevel[idx1] << 8) |
 			    (ppowerlevel[idx1] << 16) |
 			    (ppowerlevel[idx1] << 24);
-			if (rtlhal->interface == INTF_USB) {
-				if (tx_agc[idx1] > 0x20 &&
-				    rtlefuse->external_pa)
-					tx_agc[idx1] = 0x20;
-			}
+			if (tx_agc[idx1] > 0x20 && rtlefuse->external_pa)
+				tx_agc[idx1] = 0x20;
 		}
 	} else {
 		if (rtlpriv->dm.dynamic_txhighpower_lvl ==
@@ -452,9 +442,6 @@
 		udelay(1);
 		switch (rfpath) {
 		case RF90_PATH_A:
-			rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
-					(enum radio_path) rfpath);
-			break;
 		case RF90_PATH_B:
 			rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
 					(enum radio_path) rfpath);
@@ -483,7 +470,6 @@
 		}
 	}
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
-	return rtstatus;
 phy_rf_cfg_fail:
 	return rtstatus;
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index cbead00..95880fe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -321,7 +321,7 @@
 	stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
 	stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
 	stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
-	stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+	stats->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1)
 				   && (GET_RX_DESC_FAGGR(pdesc) == 1));
 	stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
 	stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index a1be5a6..587b8c5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -1240,23 +1240,22 @@
 void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	static u8 tm_trigger;
 
 	if (!rtlpriv->dm.txpower_tracking)
 		return;
 
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
 			      BIT(16), 0x03);
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Trigger 92S Thermal Meter!!\n");
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 		return;
 	} else {
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Schedule TxPowerTracking direct call!!\n");
 		rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
-		tm_trigger = 0;
+		rtlpriv->dm.tm_trigger = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index c5d4b80..232865c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -875,7 +875,7 @@
 		break;
 	default:
 		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-			 "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+			 "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
 		break;
 	}
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index da0a612..5f14308 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -1584,28 +1584,11 @@
 	}
 }
 
-static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u32 tmp;
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	rtl92ee_clear_interrupt(hw);/*clear it here first*/
-
 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
 	rtlpci->irq_enabled = true;
@@ -2194,8 +2177,8 @@
 		 "dev_addr: %pM\n", rtlefuse->dev_addr);
 	/*channel plan */
 	rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
-	/* set channel paln to world wide 13 */
-	rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+	/* set channel plan from efuse */
+	rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 	/*tx power*/
 	_rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
 					     hwinfo);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 575980b..9bae5a9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -200,7 +200,6 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
-	static u8 tm_trigger;
 	u8 tx_power_checkcnt = 5;
 
 	/* 2T2R TP issue */
@@ -215,13 +214,13 @@
 		return;
 	}
 
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER,
 			      RFREG_OFFSET_MASK, 0x60);
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 	} else {
 		_rtl92s_dm_txpowertracking_callback_thermalmeter(hw);
-		tm_trigger = 0;
+		rtlpriv->dm.tm_trigger = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 67bb47d..a4b7eac 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -1258,18 +1258,6 @@
 	}
 }
 
-static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u32 tmp;
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-}
-
 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1284,7 +1272,6 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-	rtl8723e_clear_interrupt(hw);/*clear it here first*/
 	rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
 	rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
 	rtlpci->irq_enabled = false;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
index e77c3a4..3a81cdb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -909,23 +909,22 @@
 void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	static u8 tm_trigger;
 
 	if (!rtlpriv->dm.txpower_tracking)
 		return;
 
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
 			      0x03);
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Trigger 8723be Thermal Meter!!\n");
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 		return;
 	} else {
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Schedule TxPowerTracking !!\n");
 		rtl8723be_dm_txpower_tracking_callback_thermalmeter(hw);
-		tm_trigger = 0;
+		rtlpriv->dm.tm_trigger = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
index 69d4f0f..d5da0f3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
@@ -613,7 +613,7 @@
 		break;
 	default:
 		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-			 "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+			 "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
 		break;
 	}
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index b681af3..c983d2f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -1634,28 +1634,11 @@
 	}
 }
 
-static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u32 tmp;
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	rtl8723be_clear_interrupt(hw);/*clear it here first*/
-
 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
 	rtlpci->irq_enabled = true;
@@ -2139,8 +2122,8 @@
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 		 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
 
-	/* set channel plan to world wide 13 */
-	rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+	/* set channel plan from efuse */
+	rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 
 	if (rtlhal->oem_id == RT_CID_DEFAULT) {
 		/* Does this one have a Toshiba SMID from group 1? */
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
index 342678d..b57cfd9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
@@ -1068,20 +1068,18 @@
 		struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	static u8 tm_trigger;
 
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E,
 			      BIT(17) | BIT(16), 0x03);
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Trigger 8812 Thermal Meter!!\n");
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 		return;
 	}
 	RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 		 "Schedule TxPowerTracking direct call!!\n");
 	rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw);
-	tm_trigger = 0;
 }
 
 static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw)
@@ -2519,21 +2517,19 @@
 void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	static u8 tm_trigger;
-
-	if (!tm_trigger) {
+	if (!rtlpriv->dm.tm_trigger) {
 		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16),
 			      0x03);
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Trigger 8821ae Thermal Meter!!\n");
-		tm_trigger = 1;
+		rtlpriv->dm.tm_trigger = 1;
 		return;
 	} else {
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Schedule TxPowerTracking !!\n");
 
 		rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw);
-		tm_trigger = 0;
+		rtlpriv->dm.tm_trigger = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 8704eee..3236d44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2253,31 +2253,11 @@
 	}
 }
 
-static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u32 tmp;
-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
-	/*printk("clear interrupt first:\n");
-	printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-	/*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-	/*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	rtl8821ae_clear_interrupt(hw);/*clear it here first*/
-
 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
 	rtlpci->irq_enabled = true;
@@ -3232,8 +3212,8 @@
 	if (rtlefuse->eeprom_channelplan == 0xff)
 		rtlefuse->eeprom_channelplan = 0x7F;
 
-	/* set channel paln to world wide 13 */
-	/* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */
+	/* set channel plan from efuse */
+	rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 
 	/*parse xtal*/
 	rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 5157291..2b770b5 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1665,6 +1665,7 @@
 	u8 last_dtp_lvl;
 	u8 thermalvalue_avg[AVG_THERMAL_NUM];
 	u8 thermalvalue_avg_index;
+	u8 tm_trigger;
 	bool done_txpower;
 	u8 dynamic_txhighpower_lvl;	/*Tx high power level */
 	u8 dm_flag;		/*Indicate each dynamic mechanism's status. */
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index 5695628..d6fbdda 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -53,10 +53,7 @@
 		mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i];
 
 	ret = wl1251_cmd_configure(wl, DOT11_STATION_ID, mac, sizeof(*mac));
-	if (ret < 0)
-		goto out;
 
-out:
 	kfree(mac);
 	return ret;
 }
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 5d54d16..cd47779 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -763,8 +763,7 @@
 	return (u64)(unsigned long)fp;
 }
 
-#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
-				  FIF_ALLMULTI | \
+#define WL1251_SUPPORTED_FILTERS (FIF_ALLMULTI | \
 				  FIF_FCSFAIL | \
 				  FIF_BCN_PRBRESP_PROMISC | \
 				  FIF_CONTROL | \
@@ -795,10 +794,6 @@
 	wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
 	wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
 
-	if (*total & FIF_PROMISC_IN_BSS) {
-		wl->rx_config |= CFG_BSSID_FILTER_EN;
-		wl->rx_config |= CFG_RX_ALL_GOOD;
-	}
 	if (*total & FIF_ALLMULTI)
 		/*
 		 * CFG_MC_FILTER_EN in rx_config needs to be 0 to receive
@@ -825,7 +820,7 @@
 	if (ret < 0)
 		goto out;
 
-	if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+	if (*total & FIF_ALLMULTI)
 		ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
 	else if (fp)
 		ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
@@ -1481,7 +1476,8 @@
 	/* unit us */
 	/* FIXME: find a proper value */
 
-	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS;
+	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
+	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
 
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 					 BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 717c4f5..49aca2c 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -24,6 +24,7 @@
 #include <linux/ip.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
+#include <linux/irq.h>
 
 #include "../wlcore/wlcore.h"
 #include "../wlcore/debug.h"
@@ -578,7 +579,7 @@
 
 static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
 	[PART_TOP_PRCM_ELP_SOC] = {
-		.mem  = { .start = 0x00A02000, .size  = 0x00010000 },
+		.mem  = { .start = 0x00A00000, .size  = 0x00012000 },
 		.reg  = { .start = 0x00807000, .size  = 0x00005000 },
 		.mem2 = { .start = 0x00800000, .size  = 0x0000B000 },
 		.mem3 = { .start = 0x00000000, .size  = 0x00000000 },
@@ -862,6 +863,7 @@
 {
 	u32 tmp;
 	int ret;
+	u16 irq_invert;
 
 	BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
 		WL18XX_PHY_INIT_MEM_SIZE);
@@ -911,6 +913,28 @@
 	/* re-enable FDSP clock */
 	ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
 			     MEM_FDSP_CLK_120_ENABLE);
+	if (ret < 0)
+		goto out;
+
+	ret = irq_get_trigger_type(wl->irq);
+	if ((ret == IRQ_TYPE_LEVEL_LOW) || (ret == IRQ_TYPE_EDGE_FALLING)) {
+		wl1271_info("using inverted interrupt logic: %d", ret);
+		ret = wlcore_set_partition(wl,
+					   &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+		if (ret < 0)
+			goto out;
+
+		ret = wl18xx_top_reg_read(wl, TOP_FN0_CCCR_REG_32, &irq_invert);
+		if (ret < 0)
+			goto out;
+
+		irq_invert |= BIT(1);
+		ret = wl18xx_top_reg_write(wl, TOP_FN0_CCCR_REG_32, irq_invert);
+		if (ret < 0)
+			goto out;
+
+		ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+	}
 
 out:
 	return ret;
@@ -1351,9 +1375,10 @@
 }
 
 #define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+
+static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
+				 struct wl18xx_priv_conf *priv_conf)
 {
-	struct wl18xx_priv *priv = wl->priv;
 	struct wlcore_conf_file *conf_file;
 	const struct firmware *fw;
 	int ret;
@@ -1362,14 +1387,14 @@
 	if (ret < 0) {
 		wl1271_error("could not get configuration binary %s: %d",
 			     WL18XX_CONF_FILE_NAME, ret);
-		goto out_fallback;
+		return ret;
 	}
 
 	if (fw->size != WL18XX_CONF_SIZE) {
 		wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
 			     WL18XX_CONF_SIZE, fw->size);
 		ret = -EINVAL;
-		goto out;
+		goto out_release;
 	}
 
 	conf_file = (struct wlcore_conf_file *) fw->data;
@@ -1379,7 +1404,7 @@
 			     "expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
 			     conf_file->header.magic);
 		ret = -EINVAL;
-		goto out;
+		goto out_release;
 	}
 
 	if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
@@ -1387,30 +1412,34 @@
 			     "expected 0x%08x got 0x%08x",
 			     WL18XX_CONF_VERSION, conf_file->header.version);
 		ret = -EINVAL;
-		goto out;
+		goto out_release;
 	}
 
-	memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
-	memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
+	memcpy(conf, &conf_file->core, sizeof(*conf));
+	memcpy(priv_conf, &conf_file->priv, sizeof(*priv_conf));
 
-	goto out;
-
-out_fallback:
-	wl1271_warning("falling back to default config");
-
-	/* apply driver default configuration */
-	memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
-	/* apply default private configuration */
-	memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
-
-	/* For now we just fallback */
-	return 0;
-
-out:
+out_release:
 	release_firmware(fw);
 	return ret;
 }
 
+static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+{
+	struct wl18xx_priv *priv = wl->priv;
+
+	if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+		wl1271_warning("falling back to default config");
+
+		/* apply driver default configuration */
+		memcpy(&wl->conf, &wl18xx_conf, sizeof(wl->conf));
+		/* apply default private configuration */
+		memcpy(&priv->conf, &wl18xx_default_priv_conf,
+		       sizeof(priv->conf));
+	}
+
+	return 0;
+}
+
 static int wl18xx_plt_init(struct wl1271 *wl)
 {
 	int ret;
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index a433a75..bac2364 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -109,6 +109,7 @@
 
 #define WL18XX_WELP_ARM_COMMAND		(WL18XX_REGISTERS_BASE + 0x7100)
 #define WL18XX_ENABLE			(WL18XX_REGISTERS_BASE + 0x01543C)
+#define TOP_FN0_CCCR_REG_32		(WL18XX_TOP_OCP_BASE + 0x64)
 
 /* PRCM registers */
 #define PLATFORM_DETECTION		0xA0E3E0
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 0be8079..337223b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3175,8 +3175,7 @@
 	return (u64)(unsigned long)fp;
 }
 
-#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
-				  FIF_ALLMULTI | \
+#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
 				  FIF_FCSFAIL | \
 				  FIF_BCN_PRBRESP_PROMISC | \
 				  FIF_CONTROL | \
@@ -5966,10 +5965,6 @@
 {
 	int ret;
 
-	ret = wl12xx_set_power_on(wl);
-	if (ret < 0)
-		return ret;
-
 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
 	if (ret < 0)
 		goto out;
@@ -5985,7 +5980,6 @@
 		ret = wl->ops->get_mac(wl);
 
 out:
-	wl1271_power_off(wl);
 	return ret;
 }
 
@@ -6066,18 +6060,19 @@
 	/* FIXME: find a proper value */
 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
 
-	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_SUPPORTS_PS |
-		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
-		IEEE80211_HW_HAS_RATE_CONTROL |
-		IEEE80211_HW_CONNECTION_MONITOR |
-		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		IEEE80211_HW_SPECTRUM_MGMT |
-		IEEE80211_HW_AP_LINK_PS |
-		IEEE80211_HW_AMPDU_AGGREGATION |
-		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
-		IEEE80211_HW_QUEUE_CONTROL |
-		IEEE80211_HW_CHANCTX_STA_CSA;
+	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
+	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
+	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(wl->hw, AP_LINK_PS);
+	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
+	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
 
 	wl->hw->wiphy->cipher_suites = cipher_suites;
 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -6432,10 +6427,22 @@
 	else
 		wl->irq_flags |= IRQF_ONESHOT;
 
+	ret = wl12xx_set_power_on(wl);
+	if (ret < 0)
+		goto out_free_nvs;
+
+	ret = wl12xx_get_hw_info(wl);
+	if (ret < 0) {
+		wl1271_error("couldn't get hw info");
+		wl1271_power_off(wl);
+		goto out_free_nvs;
+	}
+
 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
 				   wl->irq_flags, pdev->name, wl);
 	if (ret < 0) {
-		wl1271_error("request_irq() failed: %d", ret);
+		wl1271_error("interrupt configuration failed");
+		wl1271_power_off(wl);
 		goto out_free_nvs;
 	}
 
@@ -6449,12 +6456,7 @@
 	}
 #endif
 	disable_irq(wl->irq);
-
-	ret = wl12xx_get_hw_info(wl);
-	if (ret < 0) {
-		wl1271_error("couldn't get hw info");
-		goto out_irq;
-	}
+	wl1271_power_off(wl);
 
 	ret = wl->ops->identify_chip(wl);
 	if (ret < 0)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index e7af261..e539d9b 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1230,7 +1230,7 @@
 }
 
 #define SUPPORTED_FIF_FLAGS \
-	(FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
+	(FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
 	FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
 static void zd_op_configure_filter(struct ieee80211_hw *hw,
 			unsigned int changed_flags,
@@ -1256,7 +1256,7 @@
 	 * we will have some issue with IPv6 which uses multicast for link
 	 * layer address resolution.
 	 */
-	if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
+	if (*new_flags & FIF_ALLMULTI)
 		zd_mc_add_all(&hash);
 
 	spin_lock_irqsave(&mac->lock, flags);
@@ -1397,10 +1397,10 @@
 
 	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
 
-	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		    IEEE80211_HW_SIGNAL_UNSPEC |
-		    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-		    IEEE80211_HW_MFP_CAPABLE;
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(hw, SIGNAL_UNSPEC);
 
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0d25943..5485f91 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -52,7 +52,7 @@
  * event channels are limited resource. Split event channels are
  * enabled by default.
  */
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
 module_param(separate_tx_rx_irq, bool, 0644);
 
 /* The time that packets can stay on the guest Rx internal queue
@@ -515,14 +515,9 @@
 
 	while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
 	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
-		RING_IDX old_req_cons;
-		RING_IDX ring_slots_used;
-
 		queue->last_rx_time = jiffies;
 
-		old_req_cons = queue->rx.req_cons;
 		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
-		ring_slots_used = queue->rx.req_cons - old_req_cons;
 
 		__skb_queue_tail(&rxq, skb);
 	}
@@ -753,7 +748,7 @@
 		slots++;
 
 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
 				 txp->offset, txp->size);
 			xenvif_fatal_tx_err(queue->vif);
 			return -EINVAL;
@@ -879,7 +874,7 @@
 	if (unlikely(queue->grant_tx_handle[pending_idx] !=
 		     NETBACK_INVALID_HANDLE)) {
 		netdev_err(queue->vif->dev,
-			   "Trying to overwrite active handle! pending_idx: %x\n",
+			   "Trying to overwrite active handle! pending_idx: 0x%x\n",
 			   pending_idx);
 		BUG();
 	}
@@ -892,7 +887,7 @@
 	if (unlikely(queue->grant_tx_handle[pending_idx] ==
 		     NETBACK_INVALID_HANDLE)) {
 		netdev_err(queue->vif->dev,
-			   "Trying to unmap invalid handle! pending_idx: %x\n",
+			   "Trying to unmap invalid handle! pending_idx: 0x%x\n",
 			   pending_idx);
 		BUG();
 	}
@@ -1248,7 +1243,7 @@
 		/* No crossing a page as the payload mustn't fragment. */
 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
 			netdev_err(queue->vif->dev,
-				   "txreq.offset: %x, size: %u, end: %lu\n",
+				   "txreq.offset: %u, size: %u, end: %lu\n",
 				   txreq.offset, txreq.size,
 				   (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
 			xenvif_fatal_tx_err(queue->vif);
@@ -1598,12 +1593,12 @@
 					queue->pages_to_unmap,
 					gop - queue->tx_unmap_ops);
 		if (ret) {
-			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
 				   gop - queue->tx_unmap_ops, ret);
 			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
 				if (gop[i].status != GNTST_okay)
 					netdev_err(queue->vif->dev,
-						   " host_addr: %llx handle: %x status: %d\n",
+						   " host_addr: 0x%llx handle: 0x%x status: %d\n",
 						   gop[i].host_addr,
 						   gop[i].handle,
 						   gop[i].status);
@@ -1736,7 +1731,7 @@
 				&queue->mmap_pages[pending_idx], 1);
 	if (ret) {
 		netdev_err(queue->vif->dev,
-			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
 			   ret,
 			   pending_idx,
 			   tx_unmap_op.host_addr,
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 968787a..ec383b0 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -681,6 +681,9 @@
 	char *node;
 	unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
 
+	if (vif->credit_watch.node)
+		return -EADDRINUSE;
+
 	node = kmalloc(maxlen, GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
@@ -770,6 +773,7 @@
 	}
 
 	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+	xen_unregister_watchers(be->vif);
 	xen_register_watchers(dev, be->vif);
 	read_xenbus_vif_flags(be);
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e031c94..56d8afd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -733,7 +733,7 @@
 		if (unlikely(rx->status < 0 ||
 			     rx->offset + rx->status > PAGE_SIZE)) {
 			if (net_ratelimit())
-				dev_warn(dev, "rx->offset: %x, size: %u\n",
+				dev_warn(dev, "rx->offset: %u, size: %d\n",
 					 rx->offset, rx->status);
 			xennet_move_rx_slot(queue, skb, ref);
 			err = -EINVAL;
@@ -1560,9 +1560,8 @@
 	spin_lock_init(&queue->tx_lock);
 	spin_lock_init(&queue->rx_lock);
 
-	init_timer(&queue->rx_refill_timer);
-	queue->rx_refill_timer.data = (unsigned long)queue;
-	queue->rx_refill_timer.function = rx_refill_timeout;
+	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+		    (unsigned long)queue);
 
 	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
 		 queue->info->netdev->name, queue->id);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 107714e..722673c 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -72,6 +72,6 @@
 source "drivers/nfc/microread/Kconfig"
 source "drivers/nfc/nfcmrvl/Kconfig"
 source "drivers/nfc/st21nfca/Kconfig"
-source "drivers/nfc/st21nfcb/Kconfig"
+source "drivers/nfc/st-nci/Kconfig"
 source "drivers/nfc/nxp-nci/Kconfig"
 endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index a4292d79..368b6df 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -12,7 +12,5 @@
 obj-$(CONFIG_NFC_MRVL)		+= nfcmrvl/
 obj-$(CONFIG_NFC_TRF7970A)	+= trf7970a.o
 obj-$(CONFIG_NFC_ST21NFCA)  	+= st21nfca/
-obj-$(CONFIG_NFC_ST21NFCB)	+= st21nfcb/
+obj-$(CONFIG_NFC_ST_NCI)	+= st-nci/
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
-
-ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 661e2c8..daf3525 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -211,7 +211,6 @@
 static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
 {
 	struct microread_i2c_phy *phy = phy_id;
-	struct i2c_client *client;
 	struct sk_buff *skb = NULL;
 	int r;
 
@@ -220,8 +219,6 @@
 		return IRQ_NONE;
 	}
 
-	client = phy->i2c_dev;
-
 	if (phy->hard_fault != 0)
 		return IRQ_HANDLED;
 
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
index 5e18afd..796be24 100644
--- a/drivers/nfc/nfcmrvl/Kconfig
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -21,3 +21,14 @@
 
 	  Say Y here to compile support for Marvell NFC-over-USB driver
 	  into the kernel or say M to compile it as module.
+
+config NFC_MRVL_UART
+	tristate "Marvell NFC-over-UART driver"
+	depends on NFC_MRVL && NFC_NCI_UART
+	help
+	  Marvell NFC-over-UART driver.
+
+	  This driver provides support for Marvell NFC-over-UART devices
+
+	  Say Y here to compile support for Marvell NFC-over-UART driver
+	  into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/Makefile b/drivers/nfc/nfcmrvl/Makefile
index 97a0de7..7751962 100644
--- a/drivers/nfc/nfcmrvl/Makefile
+++ b/drivers/nfc/nfcmrvl/Makefile
@@ -7,3 +7,6 @@
 
 nfcmrvl_usb-y += usb.o
 obj-$(CONFIG_NFC_MRVL_USB) += nfcmrvl_usb.o
+
+nfcmrvl_uart-y += uart.o
+obj-$(CONFIG_NFC_MRVL_UART) += nfcmrvl_uart.o
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index ad4933c..4a8866d 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -17,6 +17,9 @@
  */
 
 #include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
 #include <linux/nfc.h>
 #include <net/nfc/nci.h>
 #include <net/nfc/nci_core.h>
@@ -63,20 +66,25 @@
 	if (!test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
 		return -EBUSY;
 
+	if (priv->config.hci_muxed) {
+		unsigned char *hdr;
+		unsigned char len = skb->len;
+
+		hdr = (char *) skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
+		hdr[0] = NFCMRVL_HCI_COMMAND_CODE;
+		hdr[1] = NFCMRVL_HCI_OGF;
+		hdr[2] = NFCMRVL_HCI_OCF;
+		hdr[3] = len;
+	}
+
 	return priv->if_ops->nci_send(priv, skb);
 }
 
 static int nfcmrvl_nci_setup(struct nci_dev *ndev)
 {
-	__u8 val;
+	__u8 val = 1;
 
-	val = NFCMRVL_GPIO_PIN_NFC_NOT_ALLOWED;
-	nci_set_config(ndev, NFCMRVL_NOT_ALLOWED_ID, 1, &val);
-	val = NFCMRVL_GPIO_PIN_NFC_ACTIVE;
-	nci_set_config(ndev, NFCMRVL_ACTIVE_ID, 1, &val);
-	val = NFCMRVL_EXT_COEX_ENABLE;
-	nci_set_config(ndev, NFCMRVL_EXT_COEX_ID, 1, &val);
-
+	nci_set_config(ndev, NFCMRVL_PB_BAIL_OUT, 1, &val);
 	return 0;
 }
 
@@ -88,11 +96,13 @@
 };
 
 struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
-						 struct nfcmrvl_if_ops *ops,
-						 struct device *dev)
+				struct nfcmrvl_if_ops *ops,
+				struct device *dev,
+				struct nfcmrvl_platform_data *pdata)
 {
 	struct nfcmrvl_private *priv;
 	int rc;
+	int headroom = 0;
 	u32 protocols;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -103,13 +113,30 @@
 	priv->if_ops = ops;
 	priv->dev = dev;
 
+	memcpy(&priv->config, pdata, sizeof(*pdata));
+
+	if (priv->config.reset_n_io) {
+		rc = devm_gpio_request_one(dev,
+					   priv->config.reset_n_io,
+					   GPIOF_OUT_INIT_LOW,
+					   "nfcmrvl_reset_n");
+		if (rc < 0)
+			nfc_err(dev, "failed to request reset_n io\n");
+	}
+
+	if (priv->config.hci_muxed)
+		headroom = NFCMRVL_HCI_EVENT_HEADER_SIZE;
+
 	protocols = NFC_PROTO_JEWEL_MASK
-		| NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+		| NFC_PROTO_MIFARE_MASK
+		| NFC_PROTO_FELICA_MASK
 		| NFC_PROTO_ISO14443_MASK
 		| NFC_PROTO_ISO14443_B_MASK
+		| NFC_PROTO_ISO15693_MASK
 		| NFC_PROTO_NFC_DEP_MASK;
 
-	priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, 0, 0);
+	priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols,
+					 headroom, 0);
 	if (!priv->ndev) {
 		nfc_err(dev, "nci_allocate_device failed\n");
 		rc = -ENOMEM;
@@ -118,6 +145,8 @@
 
 	nci_set_drvdata(priv->ndev, priv);
 
+	nfcmrvl_chip_reset(priv);
+
 	rc = nci_register_device(priv->ndev);
 	if (rc) {
 		nfc_err(dev, "nci_register_device failed %d\n", rc);
@@ -144,21 +173,84 @@
 }
 EXPORT_SYMBOL_GPL(nfcmrvl_nci_unregister_dev);
 
-int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count)
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb)
 {
-	struct sk_buff *skb;
+	if (priv->config.hci_muxed) {
+		if (skb->data[0] == NFCMRVL_HCI_EVENT_CODE &&
+		    skb->data[1] == NFCMRVL_HCI_NFC_EVENT_CODE) {
+			/* Data packet, let's extract NCI payload */
+			skb_pull(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
+		} else {
+			/* Skip this packet */
+			kfree_skb(skb);
+			return 0;
+		}
+	}
 
-	skb = nci_skb_alloc(priv->ndev, count, GFP_ATOMIC);
-	if (!skb)
-		return -ENOMEM;
+	if (test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+		nci_recv_frame(priv->ndev, skb);
+	else {
+		/* Drop this packet since nobody wants it */
+		kfree_skb(skb);
+		return 0;
+	}
 
-	memcpy(skb_put(skb, count), data, count);
-	nci_recv_frame(priv->ndev, skb);
-
-	return count;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(nfcmrvl_nci_recv_frame);
 
+void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
+{
+	/*
+	 * This function does not take care if someone is using the device.
+	 * To be improved.
+	 */
+
+	if (priv->config.reset_n_io) {
+		nfc_info(priv->dev, "reset the chip\n");
+		gpio_set_value(priv->config.reset_n_io, 0);
+		usleep_range(5000, 10000);
+		gpio_set_value(priv->config.reset_n_io, 1);
+	} else
+		nfc_info(priv->dev, "no reset available on this interface\n");
+}
+
+#ifdef CONFIG_OF
+
+int nfcmrvl_parse_dt(struct device_node *node,
+		     struct nfcmrvl_platform_data *pdata)
+{
+	int reset_n_io;
+
+	reset_n_io = of_get_named_gpio(node, "reset-n-io", 0);
+	if (reset_n_io < 0) {
+		pr_info("no reset-n-io config\n");
+		reset_n_io = 0;
+	} else if (!gpio_is_valid(reset_n_io)) {
+		pr_err("invalid reset-n-io GPIO\n");
+		return reset_n_io;
+	}
+	pdata->reset_n_io = reset_n_io;
+
+	if (of_find_property(node, "hci-muxed", NULL))
+		pdata->hci_muxed = 1;
+	else
+		pdata->hci_muxed = 0;
+
+	return 0;
+}
+
+#else
+
+int nfcmrvl_parse_dt(struct device_node *node,
+		     struct nfcmrvl_platform_data *pdata)
+{
+	return -ENODEV;
+}
+
+#endif
+EXPORT_SYMBOL_GPL(nfcmrvl_parse_dt);
+
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell NFC driver ver " VERSION);
 MODULE_VERSION(VERSION);
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index 54c4a95..e5a7e54 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -16,6 +16,11 @@
  * this warranty disclaimer.
  **/
 
+#ifndef _NFCMRVL_H_
+#define _NFCMRVL_H_
+
+#include <linux/platform_data/nfcmrvl.h>
+
 /* Define private flags: */
 #define NFCMRVL_NCI_RUNNING			1
 
@@ -27,11 +32,49 @@
 #define NFCMRVL_GPIO_PIN_NFC_ACTIVE		0xB
 #define NFCMRVL_NCI_MAX_EVENT_SIZE		260
 
+/*
+** NCI FW Parmaters
+*/
+
+#define NFCMRVL_PB_BAIL_OUT			0x11
+
+/*
+** HCI defines
+*/
+
+#define NFCMRVL_HCI_EVENT_HEADER_SIZE		0x04
+#define NFCMRVL_HCI_EVENT_CODE			0x04
+#define NFCMRVL_HCI_NFC_EVENT_CODE		0xFF
+#define NFCMRVL_HCI_COMMAND_CODE		0x01
+#define NFCMRVL_HCI_OGF				0x81
+#define NFCMRVL_HCI_OCF				0xFE
+
+enum nfcmrvl_phy {
+	NFCMRVL_PHY_USB		= 0,
+	NFCMRVL_PHY_UART	= 1,
+};
+
+
 struct nfcmrvl_private {
-	struct nci_dev *ndev;
+
 	unsigned long flags;
+
+	/* Platform configuration */
+	struct nfcmrvl_platform_data config;
+
+	struct nci_dev *ndev;
+
+	/*
+	** PHY related information
+	*/
+
+	/* PHY driver context */
 	void *drv_data;
+	/* PHY device */
 	struct device *dev;
+	/* PHY type */
+	enum nfcmrvl_phy phy;
+	/* Low level driver ops */
 	struct nfcmrvl_if_ops *if_ops;
 };
 
@@ -42,7 +85,16 @@
 };
 
 void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
-int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count);
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb);
 struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
-						 struct nfcmrvl_if_ops *ops,
-						 struct device *dev);
+				struct nfcmrvl_if_ops *ops,
+				struct device *dev,
+				struct nfcmrvl_platform_data *pdata);
+
+
+void nfcmrvl_chip_reset(struct nfcmrvl_private *priv);
+
+int nfcmrvl_parse_dt(struct device_node *node,
+		     struct nfcmrvl_platform_data *pdata);
+
+#endif
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
new file mode 100644
index 0000000..61442d6
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -0,0 +1,225 @@
+/**
+ * Marvell NFC-over-UART driver
+ *
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+static unsigned int hci_muxed;
+static unsigned int flow_control;
+static unsigned int break_control;
+static unsigned int reset_n_io;
+
+/*
+** NFCMRVL NCI OPS
+*/
+
+static int nfcmrvl_uart_nci_open(struct nfcmrvl_private *priv)
+{
+	return 0;
+}
+
+static int nfcmrvl_uart_nci_close(struct nfcmrvl_private *priv)
+{
+	return 0;
+}
+
+static int nfcmrvl_uart_nci_send(struct nfcmrvl_private *priv,
+				 struct sk_buff *skb)
+{
+	struct nci_uart *nu = priv->drv_data;
+
+	return nu->ops.send(nu, skb);
+}
+
+static struct nfcmrvl_if_ops uart_ops = {
+	.nci_open = nfcmrvl_uart_nci_open,
+	.nci_close = nfcmrvl_uart_nci_close,
+	.nci_send = nfcmrvl_uart_nci_send,
+};
+
+#ifdef CONFIG_OF
+
+static int nfcmrvl_uart_parse_dt(struct device_node *node,
+				 struct nfcmrvl_platform_data *pdata)
+{
+	struct device_node *matched_node;
+	int ret;
+
+	matched_node = of_find_compatible_node(node, NULL, "mrvl,nfc-uart");
+	if (!matched_node)
+		return -ENODEV;
+
+	ret = nfcmrvl_parse_dt(matched_node, pdata);
+	if (ret < 0) {
+		pr_err("Failed to get generic entries\n");
+		return ret;
+	}
+
+	if (of_find_property(matched_node, "flow-control", NULL))
+		pdata->flow_control = 1;
+	else
+		pdata->flow_control = 0;
+
+	if (of_find_property(matched_node, "break-control", NULL))
+		pdata->break_control = 1;
+	else
+		pdata->break_control = 0;
+
+	return 0;
+}
+
+#else
+
+static int nfcmrvl_uart_parse_dt(struct device_node *node,
+				 struct nfcmrvl_platform_data *pdata)
+{
+	return -ENODEV;
+}
+
+#endif
+
+/*
+** NCI UART OPS
+*/
+
+static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
+{
+	struct nfcmrvl_private *priv;
+	struct nfcmrvl_platform_data *pdata = NULL;
+	struct nfcmrvl_platform_data config;
+
+	/*
+	 * Platform data cannot be used here since usually it is already used
+	 * by low level serial driver. We can try to retrieve serial device
+	 * and check if DT entries were added.
+	 */
+
+	if (nu->tty->dev->parent && nu->tty->dev->parent->of_node)
+		if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node,
+					  &config) == 0)
+			pdata = &config;
+
+	if (!pdata) {
+		pr_info("No platform data / DT -> fallback to module params\n");
+		config.hci_muxed = hci_muxed;
+		config.reset_n_io = reset_n_io;
+		config.flow_control = flow_control;
+		config.break_control = break_control;
+		pdata = &config;
+	}
+
+	priv = nfcmrvl_nci_register_dev(nu, &uart_ops, nu->tty->dev, pdata);
+	if (IS_ERR(priv))
+		return PTR_ERR(priv);
+
+	priv->phy = NFCMRVL_PHY_UART;
+
+	nu->drv_data = priv;
+	nu->ndev = priv->ndev;
+
+	/* Set BREAK */
+	if (priv->config.break_control && nu->tty->ops->break_ctl)
+		nu->tty->ops->break_ctl(nu->tty, -1);
+
+	return 0;
+}
+
+static void nfcmrvl_nci_uart_close(struct nci_uart *nu)
+{
+	nfcmrvl_nci_unregister_dev((struct nfcmrvl_private *)nu->drv_data);
+}
+
+static int nfcmrvl_nci_uart_recv(struct nci_uart *nu, struct sk_buff *skb)
+{
+	return nfcmrvl_nci_recv_frame((struct nfcmrvl_private *)nu->drv_data,
+				      skb);
+}
+
+static void nfcmrvl_nci_uart_tx_start(struct nci_uart *nu)
+{
+	struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data;
+
+	/* Remove BREAK to wake up the NFCC */
+	if (priv->config.break_control && nu->tty->ops->break_ctl) {
+		nu->tty->ops->break_ctl(nu->tty, 0);
+		usleep_range(3000, 5000);
+	}
+}
+
+static void nfcmrvl_nci_uart_tx_done(struct nci_uart *nu)
+{
+	struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data;
+
+	/*
+	** To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him
+	** up. we set BREAK. Once we will be ready to send again we will remove
+	** it.
+	*/
+	if (priv->config.break_control && nu->tty->ops->break_ctl)
+		nu->tty->ops->break_ctl(nu->tty, -1);
+}
+
+static struct nci_uart nfcmrvl_nci_uart = {
+	.owner  = THIS_MODULE,
+	.name   = "nfcmrvl_uart",
+	.driver = NCI_UART_DRIVER_MARVELL,
+	.ops	= {
+		.open		= nfcmrvl_nci_uart_open,
+		.close		= nfcmrvl_nci_uart_close,
+		.recv		= nfcmrvl_nci_uart_recv,
+		.tx_start	= nfcmrvl_nci_uart_tx_start,
+		.tx_done	= nfcmrvl_nci_uart_tx_done,
+	}
+};
+
+/*
+** Module init
+*/
+
+static int nfcmrvl_uart_init_module(void)
+{
+	return nci_uart_register(&nfcmrvl_nci_uart);
+}
+
+static void nfcmrvl_uart_exit_module(void)
+{
+	nci_uart_unregister(&nfcmrvl_nci_uart);
+}
+
+module_init(nfcmrvl_uart_init_module);
+module_exit(nfcmrvl_uart_exit_module);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC-over-UART");
+MODULE_LICENSE("GPL v2");
+
+module_param(flow_control, uint, 0);
+MODULE_PARM_DESC(flow_control, "Tell if UART needs flow control at init.");
+
+module_param(break_control, uint, 0);
+MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
+
+module_param(hci_muxed, uint, 0);
+MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
+
+module_param(reset_n_io, uint, 0);
+MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 6cf15c1..7d1fe43 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -26,7 +26,8 @@
 #define VERSION "1.0"
 
 static struct usb_device_id nfcmrvl_table[] = {
-	{ USB_DEVICE_INTERFACE_CLASS(0x1286, 0x2046, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1286, 0x2046,
+					USB_CLASS_VENDOR_SPEC, 4, 1) },
 	{ }	/* Terminating entry */
 };
 
@@ -69,18 +70,27 @@
 static void nfcmrvl_bulk_complete(struct urb *urb)
 {
 	struct nfcmrvl_usb_drv_data *drv_data = urb->context;
+	struct sk_buff *skb;
 	int err;
 
-	dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d",
+	dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d\n",
 		urb, urb->status, urb->actual_length);
 
 	if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags))
 		return;
 
 	if (!urb->status) {
-		if (nfcmrvl_nci_recv_frame(drv_data->priv, urb->transfer_buffer,
-					   urb->actual_length) < 0)
-			nfc_err(&drv_data->udev->dev, "corrupted Rx packet\n");
+		skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length,
+				    GFP_ATOMIC);
+		if (!skb) {
+			nfc_err(&drv_data->udev->dev, "failed to alloc mem\n");
+		} else {
+			memcpy(skb_put(skb, urb->actual_length),
+			       urb->transfer_buffer, urb->actual_length);
+			if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
+				nfc_err(&drv_data->udev->dev,
+					"corrupted Rx packet\n");
+		}
 	}
 
 	if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags))
@@ -292,6 +302,10 @@
 	struct nfcmrvl_private *priv;
 	int i;
 	struct usb_device *udev = interface_to_usbdev(intf);
+	struct nfcmrvl_platform_data config;
+
+	/* No configuration for USB */
+	memset(&config, 0, sizeof(config));
 
 	nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
 
@@ -329,11 +343,12 @@
 	init_usb_anchor(&drv_data->deferred);
 
 	priv = nfcmrvl_nci_register_dev(drv_data, &usb_ops,
-					&drv_data->udev->dev);
+					&drv_data->udev->dev, &config);
 	if (IS_ERR(priv))
 		return PTR_ERR(priv);
 
 	drv_data->priv = priv;
+	drv_data->priv->phy = NFCMRVL_PHY_USB;
 	priv->dev = &drv_data->udev->dev;
 
 	usb_set_intfdata(intf, drv_data);
diff --git a/drivers/nfc/nxp-nci/Makefile b/drivers/nfc/nxp-nci/Makefile
index c008be3..c9ec786 100644
--- a/drivers/nfc/nxp-nci/Makefile
+++ b/drivers/nfc/nxp-nci/Makefile
@@ -7,5 +7,3 @@
 
 obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci.o
 obj-$(CONFIG_NFC_NXP_NCI_I2C) += nxp-nci_i2c.o
-
-ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 17bd67d..2f77f1d 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -2,8 +2,10 @@
  * I2C link layer for the NXP NCI driver
  *
  * Copyright (C) 2014  NXP Semiconductors  All rights reserved.
+ * Copyright (C) 2012-2015  Intel Corporation. All rights reserved.
  *
  * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ * Authors: Oleg Zhurakivskyy <oleg.zhurakivskyy@intel.com>
  *
  * Derived from PN544 device driver:
  * Copyright (C) 2012  Intel Corporation. All rights reserved.
@@ -23,12 +25,14 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/nfc.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 #include <linux/of_irq.h>
 #include <linux/platform_data/nxp-nci.h>
@@ -48,6 +52,7 @@
 
 	unsigned int gpio_en;
 	unsigned int gpio_fw;
+	unsigned int gpio_irq;
 
 	int hard_fault; /*
 			 * < 0 if hardware error occurred (e.g. i2c err)
@@ -308,6 +313,37 @@
 
 #endif
 
+static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy)
+{
+	struct i2c_client *client = phy->i2c_dev;
+	struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq;
+
+	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2);
+	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1);
+	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0);
+
+	if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) {
+		nfc_err(&client->dev, "No GPIOs\n");
+		return -EINVAL;
+	}
+
+	gpiod_direction_output(gpiod_en, 0);
+	gpiod_direction_output(gpiod_fw, 0);
+	gpiod_direction_input(gpiod_irq);
+
+	client->irq = gpiod_to_irq(gpiod_irq);
+	if (client->irq < 0) {
+		nfc_err(&client->dev, "No IRQ\n");
+		return -EINVAL;
+	}
+
+	phy->gpio_en = desc_to_gpio(gpiod_en);
+	phy->gpio_fw = desc_to_gpio(gpiod_fw);
+	phy->gpio_irq = desc_to_gpio(gpiod_irq);
+
+	return 0;
+}
+
 static int nxp_nci_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 {
@@ -343,6 +379,11 @@
 		phy->gpio_en = pdata->gpio_en;
 		phy->gpio_fw = pdata->gpio_fw;
 		client->irq = pdata->irq;
+	} else if (ACPI_HANDLE(&client->dev)) {
+		r = nxp_nci_i2c_acpi_config(phy);
+		if (r < 0)
+			goto probe_exit;
+		goto nci_probe;
 	} else {
 		nfc_err(&client->dev, "No platform data\n");
 		r = -EINVAL;
@@ -359,6 +400,7 @@
 	if (r < 0)
 		goto probe_exit;
 
+nci_probe:
 	r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops,
 			  NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev);
 	if (r < 0)
@@ -397,10 +439,19 @@
 };
 MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
 
+#ifdef CONFIG_ACPI
+static struct acpi_device_id acpi_id[] = {
+	{ "NXP7471" },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_id);
+#endif
+
 static struct i2c_driver nxp_nci_i2c_driver = {
 	.driver = {
 		   .name = NXP_NCI_I2C_DRIVER_NAME,
 		   .owner  = THIS_MODULE,
+		   .acpi_match_table = ACPI_PTR(acpi_id),
 		   .of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
 		  },
 	.probe = nxp_nci_i2c_probe,
@@ -413,3 +464,4 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("I2C driver for NXP NCI NFC controllers");
 MODULE_AUTHOR("Clément Perrochaud <clement.perrochaud@nxp.com>");
+MODULE_AUTHOR("Oleg Zhurakivskyy <oleg.zhurakivskyy@intel.com>");
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 6fd986f..fa75c53 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -895,56 +895,35 @@
 		return -ENODEV;
 
 	/* Get EN GPIO from ACPI */
-	gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1);
+	gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1,
+					GPIOD_OUT_LOW);
 	if (IS_ERR(gpiod_en)) {
-		nfc_err(dev,
-			"Unable to get EN GPIO\n");
+		nfc_err(dev, "Unable to get EN GPIO\n");
 		return -ENODEV;
 	}
 
-	phy->gpio_en  = desc_to_gpio(gpiod_en);
-
-	/* Configuration EN GPIO */
-	ret = gpiod_direction_output(gpiod_en, 0);
-	if (ret) {
-		nfc_err(dev, "Fail EN pin direction\n");
-		return ret;
-	}
+	phy->gpio_en = desc_to_gpio(gpiod_en);
 
 	/* Get FW GPIO from ACPI */
-	gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2);
+	gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2,
+					GPIOD_OUT_LOW);
 	if (IS_ERR(gpiod_fw)) {
-		nfc_err(dev,
-			"Unable to get FW GPIO\n");
+		nfc_err(dev, "Unable to get FW GPIO\n");
 		return -ENODEV;
 	}
 
-	phy->gpio_fw  = desc_to_gpio(gpiod_fw);
-
-	/* Configuration FW GPIO */
-	ret = gpiod_direction_output(gpiod_fw, 0);
-	if (ret) {
-		nfc_err(dev, "Fail FW pin direction\n");
-		return ret;
-	}
+	phy->gpio_fw = desc_to_gpio(gpiod_fw);
 
 	/* Get IRQ GPIO */
-	gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0);
+	gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0,
+					 GPIOD_IN);
 	if (IS_ERR(gpiod_irq)) {
-		nfc_err(dev,
-			"Unable to get IRQ GPIO\n");
+		nfc_err(dev, "Unable to get IRQ GPIO\n");
 		return -ENODEV;
 	}
 
 	phy->gpio_irq = desc_to_gpio(gpiod_irq);
 
-	/* Configure IRQ GPIO */
-	ret = gpiod_direction_input(gpiod_irq);
-	if (ret) {
-		nfc_err(dev, "Fail IRQ pin direction\n");
-		return ret;
-	}
-
 	/* Map the pin to an IRQ */
 	ret = gpiod_to_irq(gpiod_irq);
 	if (ret < 0) {
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
new file mode 100644
index 0000000..fc3904c
--- /dev/null
+++ b/drivers/nfc/st-nci/Kconfig
@@ -0,0 +1,23 @@
+config NFC_ST_NCI
+	tristate "STMicroelectronics ST NCI NFC driver"
+	depends on NFC_NCI
+	default n
+	---help---
+	  STMicroelectronics NFC NCI chips core driver. It implements the chipset
+	  NCI logic and hooks into the NFC kernel APIs. Physical layers will
+	  register against it.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called st-nci.
+	  Say N if unsure.
+
+config NFC_ST_NCI_I2C
+	tristate "NFC ST NCI i2c support"
+	depends on NFC_ST_NCI && I2C
+	---help---
+	  This module adds support for an I2C interface to the
+	  STMicroelectronics NFC NCI chips familly.
+	  Select this if your platform is using the i2c bus.
+
+	  If you choose to build a module, it'll be called st-nci_i2c.
+	  Say N if unsure.
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
new file mode 100644
index 0000000..0df157d
--- /dev/null
+++ b/drivers/nfc/st-nci/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for ST21NFCB NCI based NFC driver
+#
+
+st-nci-objs = ndlc.o core.o st-nci_se.o
+obj-$(CONFIG_NFC_ST_NCI)     += st-nci.o
+
+st-nci_i2c-objs = i2c.o
+obj-$(CONFIG_NFC_ST_NCI_I2C) += st-nci_i2c.o
diff --git a/drivers/nfc/st-nci/core.c b/drivers/nfc/st-nci/core.c
new file mode 100644
index 0000000..c419d39
--- /dev/null
+++ b/drivers/nfc/st-nci/core.c
@@ -0,0 +1,179 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "st-nci.h"
+#include "st-nci_se.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+
+#define ST_NCI1_X_PROPRIETARY_ISO15693 0x83
+
+static int st_nci_init(struct nci_dev *ndev)
+{
+	struct nci_mode_set_cmd cmd;
+
+	cmd.cmd_type = ST_NCI_SET_NFC_MODE;
+	cmd.mode = 1;
+
+	return nci_prop_cmd(ndev, ST_NCI_CORE_PROP,
+			sizeof(struct nci_mode_set_cmd), (__u8 *)&cmd);
+}
+
+static int st_nci_open(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+	int r;
+
+	if (test_and_set_bit(ST_NCI_RUNNING, &info->flags))
+		return 0;
+
+	r = ndlc_open(info->ndlc);
+	if (r)
+		clear_bit(ST_NCI_RUNNING, &info->flags);
+
+	return r;
+}
+
+static int st_nci_close(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	if (!test_bit(ST_NCI_RUNNING, &info->flags))
+		return 0;
+
+	ndlc_close(info->ndlc);
+
+	clear_bit(ST_NCI_RUNNING, &info->flags);
+
+	return 0;
+}
+
+static int st_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	skb->dev = (void *)ndev;
+
+	if (!test_bit(ST_NCI_RUNNING, &info->flags))
+		return -EBUSY;
+
+	return ndlc_send(info->ndlc, skb);
+}
+
+static __u32 st_nci_get_rfprotocol(struct nci_dev *ndev,
+					 __u8 rf_protocol)
+{
+	return rf_protocol == ST_NCI1_X_PROPRIETARY_ISO15693 ?
+		NFC_PROTO_ISO15693_MASK : 0;
+}
+
+static int st_nci_prop_rsp_packet(struct nci_dev *ndev,
+					struct sk_buff *skb)
+{
+	__u8 status = skb->data[0];
+
+	nci_req_complete(ndev, status);
+	return 0;
+}
+
+static struct nci_prop_ops st_nci_prop_ops[] = {
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+					  ST_NCI_CORE_PROP),
+		.rsp = st_nci_prop_rsp_packet,
+	},
+};
+
+static struct nci_ops st_nci_ops = {
+	.init = st_nci_init,
+	.open = st_nci_open,
+	.close = st_nci_close,
+	.send = st_nci_send,
+	.get_rfprotocol = st_nci_get_rfprotocol,
+	.discover_se = st_nci_discover_se,
+	.enable_se = st_nci_enable_se,
+	.disable_se = st_nci_disable_se,
+	.se_io = st_nci_se_io,
+	.hci_load_session = st_nci_hci_load_session,
+	.hci_event_received = st_nci_hci_event_received,
+	.hci_cmd_received = st_nci_hci_cmd_received,
+	.prop_ops = st_nci_prop_ops,
+	.n_prop_ops = ARRAY_SIZE(st_nci_prop_ops),
+};
+
+int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
+		       int phy_tailroom)
+{
+	struct st_nci_info *info;
+	int r;
+	u32 protocols;
+
+	info = devm_kzalloc(ndlc->dev,
+			sizeof(struct st_nci_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	protocols = NFC_PROTO_JEWEL_MASK
+		| NFC_PROTO_MIFARE_MASK
+		| NFC_PROTO_FELICA_MASK
+		| NFC_PROTO_ISO14443_MASK
+		| NFC_PROTO_ISO14443_B_MASK
+		| NFC_PROTO_ISO15693_MASK
+		| NFC_PROTO_NFC_DEP_MASK;
+
+	ndlc->ndev = nci_allocate_device(&st_nci_ops, protocols,
+					phy_headroom, phy_tailroom);
+	if (!ndlc->ndev) {
+		pr_err("Cannot allocate nfc ndev\n");
+		return -ENOMEM;
+	}
+	info->ndlc = ndlc;
+
+	nci_set_drvdata(ndlc->ndev, info);
+
+	r = nci_register_device(ndlc->ndev);
+	if (r) {
+		pr_err("Cannot register nfc device to nci core\n");
+		nci_free_device(ndlc->ndev);
+		return r;
+	}
+
+	return st_nci_se_init(ndlc->ndev);
+}
+EXPORT_SYMBOL_GPL(st_nci_probe);
+
+void st_nci_remove(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	ndlc_close(info->ndlc);
+
+	nci_unregister_device(ndev);
+	nci_free_device(ndev);
+}
+EXPORT_SYMBOL_GPL(st_nci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st-nci/i2c.c
similarity index 64%
rename from drivers/nfc/st21nfcb/i2c.c
rename to drivers/nfc/st-nci/i2c.c
index 76a4cad..06175ce 100644
--- a/drivers/nfc/st21nfcb/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -1,6 +1,6 @@
 /*
- * I2C Link Layer for ST21NFCB NCI based Driver
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ * I2C Link Layer for ST NCI NFC controller familly based Driver
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -25,7 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/nfc.h>
-#include <linux/platform_data/st21nfcb.h>
+#include <linux/platform_data/st_nci.h>
 
 #include "ndlc.h"
 
@@ -35,25 +35,23 @@
 #define ST21NFCB_FRAME_HEADROOM	1
 #define ST21NFCB_FRAME_TAILROOM 0
 
-#define ST21NFCB_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
-#define ST21NFCB_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
+#define ST_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
+#define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
 
-#define ST21NFCB_NCI_I2C_DRIVER_NAME "st21nfcb_nci_i2c"
+#define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c"
 
-static struct i2c_device_id st21nfcb_nci_i2c_id_table[] = {
-	{ST21NFCB_NCI_DRIVER_NAME, 0},
+static struct i2c_device_id st_nci_i2c_id_table[] = {
+	{ST_NCI_DRIVER_NAME, 0},
 	{}
 };
-MODULE_DEVICE_TABLE(i2c, st21nfcb_nci_i2c_id_table);
+MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table);
 
-struct st21nfcb_i2c_phy {
+struct st_nci_i2c_phy {
 	struct i2c_client *i2c_dev;
 	struct llt_ndlc *ndlc;
 
 	unsigned int gpio_reset;
 	unsigned int irq_polarity;
-
-	int powered;
 };
 
 #define I2C_DUMP_SKB(info, skb)					\
@@ -63,33 +61,26 @@
 		       16, 1, (skb)->data, (skb)->len, 0);	\
 } while (0)
 
-static int st21nfcb_nci_i2c_enable(void *phy_id)
+static int st_nci_i2c_enable(void *phy_id)
 {
-	struct st21nfcb_i2c_phy *phy = phy_id;
+	struct st_nci_i2c_phy *phy = phy_id;
 
 	gpio_set_value(phy->gpio_reset, 0);
 	usleep_range(10000, 15000);
 	gpio_set_value(phy->gpio_reset, 1);
-	phy->powered = 1;
 	usleep_range(80000, 85000);
 
+	if (phy->ndlc->powered == 0)
+		enable_irq(phy->i2c_dev->irq);
+
 	return 0;
 }
 
-static void st21nfcb_nci_i2c_disable(void *phy_id)
+static void st_nci_i2c_disable(void *phy_id)
 {
-	struct st21nfcb_i2c_phy *phy = phy_id;
+	struct st_nci_i2c_phy *phy = phy_id;
 
-	phy->powered = 0;
-	/* reset chip in order to flush clf */
-	gpio_set_value(phy->gpio_reset, 0);
-	usleep_range(10000, 15000);
-	gpio_set_value(phy->gpio_reset, 1);
-}
-
-static void st21nfcb_nci_remove_header(struct sk_buff *skb)
-{
-	skb_pull(skb, ST21NFCB_FRAME_HEADROOM);
+	disable_irq_nosync(phy->i2c_dev->irq);
 }
 
 /*
@@ -97,13 +88,13 @@
  * It must return either zero for success, or <0 for error.
  * In addition, it must not alter the skb
  */
-static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
 {
 	int r = -1;
-	struct st21nfcb_i2c_phy *phy = phy_id;
+	struct st_nci_i2c_phy *phy = phy_id;
 	struct i2c_client *client = phy->i2c_dev;
 
-	I2C_DUMP_SKB("st21nfcb_nci_i2c_write", skb);
+	I2C_DUMP_SKB("st_nci_i2c_write", skb);
 
 	if (phy->ndlc->hard_fault != 0)
 		return phy->ndlc->hard_fault;
@@ -121,8 +112,6 @@
 			r = 0;
 	}
 
-	st21nfcb_nci_remove_header(skb);
-
 	return r;
 }
 
@@ -135,40 +124,40 @@
  * at end of read)
  * -EREMOTEIO : i2c read error (fatal)
  * -EBADMSG : frame was incorrect and discarded
- * (value returned from st21nfcb_nci_i2c_repack)
+ * (value returned from st_nci_i2c_repack)
  * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
  * the read length end sequence
  */
-static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
+static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
 				 struct sk_buff **skb)
 {
 	int r;
 	u8 len;
-	u8 buf[ST21NFCB_NCI_I2C_MAX_SIZE];
+	u8 buf[ST_NCI_I2C_MAX_SIZE];
 	struct i2c_client *client = phy->i2c_dev;
 
-	r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+	r = i2c_master_recv(client, buf, ST_NCI_I2C_MIN_SIZE);
 	if (r < 0) {  /* Retry, chip was in standby */
 		usleep_range(1000, 4000);
-		r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+		r = i2c_master_recv(client, buf, ST_NCI_I2C_MIN_SIZE);
 	}
 
-	if (r != ST21NFCB_NCI_I2C_MIN_SIZE)
+	if (r != ST_NCI_I2C_MIN_SIZE)
 		return -EREMOTEIO;
 
 	len = be16_to_cpu(*(__be16 *) (buf + 2));
-	if (len > ST21NFCB_NCI_I2C_MAX_SIZE) {
+	if (len > ST_NCI_I2C_MAX_SIZE) {
 		nfc_err(&client->dev, "invalid frame len\n");
 		return -EBADMSG;
 	}
 
-	*skb = alloc_skb(ST21NFCB_NCI_I2C_MIN_SIZE + len, GFP_KERNEL);
+	*skb = alloc_skb(ST_NCI_I2C_MIN_SIZE + len, GFP_KERNEL);
 	if (*skb == NULL)
 		return -ENOMEM;
 
-	skb_reserve(*skb, ST21NFCB_NCI_I2C_MIN_SIZE);
-	skb_put(*skb, ST21NFCB_NCI_I2C_MIN_SIZE);
-	memcpy((*skb)->data, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+	skb_reserve(*skb, ST_NCI_I2C_MIN_SIZE);
+	skb_put(*skb, ST_NCI_I2C_MIN_SIZE);
+	memcpy((*skb)->data, buf, ST_NCI_I2C_MIN_SIZE);
 
 	if (!len)
 		return 0;
@@ -180,7 +169,7 @@
 	}
 
 	skb_put(*skb, len);
-	memcpy((*skb)->data + ST21NFCB_NCI_I2C_MIN_SIZE, buf, len);
+	memcpy((*skb)->data + ST_NCI_I2C_MIN_SIZE, buf, len);
 
 	I2C_DUMP_SKB("i2c frame read", *skb);
 
@@ -192,9 +181,9 @@
  *
  * On ST21NFCB, IRQ goes in idle state when read starts.
  */
-static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
+static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
 {
-	struct st21nfcb_i2c_phy *phy = phy_id;
+	struct st_nci_i2c_phy *phy = phy_id;
 	struct i2c_client *client;
 	struct sk_buff *skb = NULL;
 	int r;
@@ -210,12 +199,12 @@
 	if (phy->ndlc->hard_fault)
 		return IRQ_HANDLED;
 
-	if (!phy->powered) {
-		st21nfcb_nci_i2c_disable(phy);
+	if (!phy->ndlc->powered) {
+		st_nci_i2c_disable(phy);
 		return IRQ_HANDLED;
 	}
 
-	r = st21nfcb_nci_i2c_read(phy, &skb);
+	r = st_nci_i2c_read(phy, &skb);
 	if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
 		return IRQ_HANDLED;
 
@@ -225,15 +214,15 @@
 }
 
 static struct nfc_phy_ops i2c_phy_ops = {
-	.write = st21nfcb_nci_i2c_write,
-	.enable = st21nfcb_nci_i2c_enable,
-	.disable = st21nfcb_nci_i2c_disable,
+	.write = st_nci_i2c_write,
+	.enable = st_nci_i2c_enable,
+	.disable = st_nci_i2c_disable,
 };
 
 #ifdef CONFIG_OF
-static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
+static int st_nci_i2c_of_request_resources(struct i2c_client *client)
 {
-	struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+	struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
 	struct device_node *pp;
 	int gpio;
 	int r;
@@ -264,16 +253,16 @@
 	return 0;
 }
 #else
-static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
+static int st_nci_i2c_of_request_resources(struct i2c_client *client)
 {
 	return -ENODEV;
 }
 #endif
 
-static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client)
+static int st_nci_i2c_request_resources(struct i2c_client *client)
 {
-	struct st21nfcb_nfc_platform_data *pdata;
-	struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+	struct st_nci_nfc_platform_data *pdata;
+	struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
 	int r;
 
 	pdata = client->dev.platform_data;
@@ -296,11 +285,11 @@
 	return 0;
 }
 
-static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
+static int st_nci_i2c_probe(struct i2c_client *client,
 				  const struct i2c_device_id *id)
 {
-	struct st21nfcb_i2c_phy *phy;
-	struct st21nfcb_nfc_platform_data *pdata;
+	struct st_nci_i2c_phy *phy;
+	struct st_nci_nfc_platform_data *pdata;
 	int r;
 
 	dev_dbg(&client->dev, "%s\n", __func__);
@@ -311,7 +300,7 @@
 		return -ENODEV;
 	}
 
-	phy = devm_kzalloc(&client->dev, sizeof(struct st21nfcb_i2c_phy),
+	phy = devm_kzalloc(&client->dev, sizeof(struct st_nci_i2c_phy),
 			   GFP_KERNEL);
 	if (!phy)
 		return -ENOMEM;
@@ -322,13 +311,13 @@
 
 	pdata = client->dev.platform_data;
 	if (!pdata && client->dev.of_node) {
-		r = st21nfcb_nci_i2c_of_request_resources(client);
+		r = st_nci_i2c_of_request_resources(client);
 		if (r) {
 			nfc_err(&client->dev, "No platform data\n");
 			return r;
 		}
 	} else if (pdata) {
-		r = st21nfcb_nci_i2c_request_resources(client);
+		r = st_nci_i2c_request_resources(client);
 		if (r) {
 			nfc_err(&client->dev,
 				"Cannot get platform resources\n");
@@ -349,50 +338,48 @@
 	}
 
 	r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
-				st21nfcb_nci_irq_thread_fn,
+				st_nci_irq_thread_fn,
 				phy->irq_polarity | IRQF_ONESHOT,
-				ST21NFCB_NCI_DRIVER_NAME, phy);
+				ST_NCI_DRIVER_NAME, phy);
 	if (r < 0)
 		nfc_err(&client->dev, "Unable to register IRQ handler\n");
 
 	return r;
 }
 
-static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
+static int st_nci_i2c_remove(struct i2c_client *client)
 {
-	struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+	struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
 
 	dev_dbg(&client->dev, "%s\n", __func__);
 
 	ndlc_remove(phy->ndlc);
 
-	if (phy->powered)
-		st21nfcb_nci_i2c_disable(phy);
-
 	return 0;
 }
 
 #ifdef CONFIG_OF
-static const struct of_device_id of_st21nfcb_i2c_match[] = {
+static const struct of_device_id of_st_nci_i2c_match[] = {
 	{ .compatible = "st,st21nfcb-i2c", },
 	{ .compatible = "st,st21nfcb_i2c", },
+	{ .compatible = "st,st21nfcc-i2c", },
 	{}
 };
-MODULE_DEVICE_TABLE(of, of_st21nfcb_i2c_match);
+MODULE_DEVICE_TABLE(of, of_st_nci_i2c_match);
 #endif
 
-static struct i2c_driver st21nfcb_nci_i2c_driver = {
+static struct i2c_driver st_nci_i2c_driver = {
 	.driver = {
 		.owner = THIS_MODULE,
-		.name = ST21NFCB_NCI_I2C_DRIVER_NAME,
-		.of_match_table = of_match_ptr(of_st21nfcb_i2c_match),
+		.name = ST_NCI_I2C_DRIVER_NAME,
+		.of_match_table = of_match_ptr(of_st_nci_i2c_match),
 	},
-	.probe = st21nfcb_nci_i2c_probe,
-	.id_table = st21nfcb_nci_i2c_id_table,
-	.remove = st21nfcb_nci_i2c_remove,
+	.probe = st_nci_i2c_probe,
+	.id_table = st_nci_i2c_id_table,
+	.remove = st_nci_i2c_remove,
 };
 
-module_i2c_driver(st21nfcb_nci_i2c_driver);
+module_i2c_driver(st_nci_i2c_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st-nci/ndlc.c
similarity index 94%
rename from drivers/nfc/st21nfcb/ndlc.c
rename to drivers/nfc/st-nci/ndlc.c
index 6014b58..56c6a4c 100644
--- a/drivers/nfc/st21nfcb/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -1,7 +1,7 @@
 /*
  * Low Level Transport (NDLC) Driver for STMicroelectronics NFC Chip
  *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,7 @@
 #include <net/nfc/nci_core.h>
 
 #include "ndlc.h"
-#include "st21nfcb.h"
+#include "st-nci.h"
 
 #define NDLC_TIMER_T1		100
 #define NDLC_TIMER_T1_WAIT	400
@@ -59,13 +59,25 @@
 {
 	/* toggle reset pin */
 	ndlc->ops->enable(ndlc->phy_id);
+	ndlc->powered = 1;
 	return 0;
 }
 EXPORT_SYMBOL(ndlc_open);
 
 void ndlc_close(struct llt_ndlc *ndlc)
 {
+	struct nci_mode_set_cmd cmd;
+
+	cmd.cmd_type = ST_NCI_SET_NFC_MODE;
+	cmd.mode = 0;
+
 	/* toggle reset pin */
+	ndlc->ops->enable(ndlc->phy_id);
+
+	nci_prop_cmd(ndlc->ndev, ST_NCI_CORE_PROP,
+		     sizeof(struct nci_mode_set_cmd), (__u8 *)&cmd);
+
+	ndlc->powered = 0;
 	ndlc->ops->disable(ndlc->phy_id);
 }
 EXPORT_SYMBOL(ndlc_close);
@@ -262,6 +274,7 @@
 	ndlc->ops = phy_ops;
 	ndlc->phy_id = phy_id;
 	ndlc->dev = dev;
+	ndlc->powered = 0;
 
 	*ndlc_id = ndlc;
 
@@ -280,12 +293,14 @@
 
 	INIT_WORK(&ndlc->sm_work, llt_ndlc_sm_work);
 
-	return st21nfcb_nci_probe(ndlc, phy_headroom, phy_tailroom);
+	return st_nci_probe(ndlc, phy_headroom, phy_tailroom);
 }
 EXPORT_SYMBOL(ndlc_probe);
 
 void ndlc_remove(struct llt_ndlc *ndlc)
 {
+	st_nci_remove(ndlc->ndev);
+
 	/* cancel timers */
 	del_timer_sync(&ndlc->t1_timer);
 	del_timer_sync(&ndlc->t2_timer);
@@ -294,7 +309,5 @@
 
 	skb_queue_purge(&ndlc->rcv_q);
 	skb_queue_purge(&ndlc->send_q);
-
-	st21nfcb_nci_remove(ndlc->ndev);
 }
 EXPORT_SYMBOL(ndlc_remove);
diff --git a/drivers/nfc/st21nfcb/ndlc.h b/drivers/nfc/st-nci/ndlc.h
similarity index 92%
rename from drivers/nfc/st21nfcb/ndlc.h
rename to drivers/nfc/st-nci/ndlc.h
index b28140e..6361005 100644
--- a/drivers/nfc/st21nfcb/ndlc.h
+++ b/drivers/nfc/st-nci/ndlc.h
@@ -1,7 +1,7 @@
 /*
  * NCI based Driver for STMicroelectronics NFC Chip
  *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -43,10 +43,11 @@
 	struct device *dev;
 
 	/*
-	 * < 0 if hardware error occured
+	 * < 0 if hardware error occurred
 	 * and prevents normal operation.
 	 */
 	int hard_fault;
+	int powered;
 };
 
 int ndlc_open(struct llt_ndlc *ndlc);
diff --git a/drivers/nfc/st21nfcb/st21nfcb.h b/drivers/nfc/st-nci/st-nci.h
similarity index 63%
rename from drivers/nfc/st21nfcb/st21nfcb.h
rename to drivers/nfc/st-nci/st-nci.h
index 5ef8a58..850a239 100644
--- a/drivers/nfc/st21nfcb/st21nfcb.h
+++ b/drivers/nfc/st-nci/st-nci.h
@@ -16,23 +16,35 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef __LOCAL_ST21NFCB_H_
-#define __LOCAL_ST21NFCB_H_
+#ifndef __LOCAL_ST_NCI_H_
+#define __LOCAL_ST_NCI_H_
 
-#include "st21nfcb_se.h"
+#include "st-nci_se.h"
 #include "ndlc.h"
 
 /* Define private flags: */
-#define ST21NFCB_NCI_RUNNING			1
+#define ST_NCI_RUNNING			1
 
-struct st21nfcb_nci_info {
+#define ST_NCI_CORE_PROP                0x01
+#define ST_NCI_SET_NFC_MODE             0x02
+
+struct nci_mode_set_cmd {
+	u8 cmd_type;
+	u8 mode;
+} __packed;
+
+struct nci_mode_set_rsp {
+	u8 status;
+} __packed;
+
+struct st_nci_info {
 	struct llt_ndlc *ndlc;
 	unsigned long flags;
-	struct st21nfcb_se_info se_info;
+	struct st_nci_se_info se_info;
 };
 
-void st21nfcb_nci_remove(struct nci_dev *ndev);
-int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
+void st_nci_remove(struct nci_dev *ndev);
+int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
 		int phy_tailroom);
 
-#endif /* __LOCAL_ST21NFCB_H_ */
+#endif /* __LOCAL_ST_NCI_H_ */
diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/st-nci_se.c
new file mode 100644
index 0000000..97addfa
--- /dev/null
+++ b/drivers/nfc/st-nci/st-nci_se.c
@@ -0,0 +1,714 @@
+/*
+ * Secure Element driver for STMicroelectronics NFC NCI chip
+ *
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/delay.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+#include "st-nci.h"
+#include "st-nci_se.h"
+
+struct st_nci_pipe_info {
+	u8 pipe_state;
+	u8 src_host_id;
+	u8 src_gate_id;
+	u8 dst_host_id;
+	u8 dst_gate_id;
+} __packed;
+
+/* Hosts */
+#define ST_NCI_HOST_CONTROLLER_ID     0x00
+#define ST_NCI_TERMINAL_HOST_ID       0x01
+#define ST_NCI_UICC_HOST_ID           0x02
+#define ST_NCI_ESE_HOST_ID            0xc0
+
+/* Gates */
+#define ST_NCI_DEVICE_MGNT_GATE       0x01
+#define ST_NCI_APDU_READER_GATE       0xf0
+#define ST_NCI_CONNECTIVITY_GATE      0x41
+
+/* Pipes */
+#define ST_NCI_DEVICE_MGNT_PIPE               0x02
+
+/* Connectivity pipe only */
+#define ST_NCI_SE_COUNT_PIPE_UICC             0x01
+/* Connectivity + APDU Reader pipe */
+#define ST_NCI_SE_COUNT_PIPE_EMBEDDED         0x02
+
+#define ST_NCI_SE_TO_HOT_PLUG			1000 /* msecs */
+#define ST_NCI_SE_TO_PIPES			2000
+
+#define ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(x)   (x->data[0] & 0x80)
+
+#define NCI_HCI_APDU_PARAM_ATR                     0x01
+#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY       0x01
+#define NCI_HCI_ADMIN_PARAM_WHITELIST              0x03
+#define NCI_HCI_ADMIN_PARAM_HOST_LIST              0x04
+
+#define ST_NCI_EVT_SE_HARD_RESET		0x20
+#define ST_NCI_EVT_TRANSMIT_DATA		0x10
+#define ST_NCI_EVT_WTX_REQUEST		0x11
+#define ST_NCI_EVT_SE_SOFT_RESET		0x11
+#define ST_NCI_EVT_SE_END_OF_APDU_TRANSFER	0x21
+#define ST_NCI_EVT_HOT_PLUG			0x03
+
+#define ST_NCI_SE_MODE_OFF                    0x00
+#define ST_NCI_SE_MODE_ON                     0x01
+
+#define ST_NCI_EVT_CONNECTIVITY       0x10
+#define ST_NCI_EVT_TRANSACTION        0x12
+
+#define ST_NCI_DM_GETINFO             0x13
+#define ST_NCI_DM_GETINFO_PIPE_LIST   0x02
+#define ST_NCI_DM_GETINFO_PIPE_INFO   0x01
+#define ST_NCI_DM_PIPE_CREATED        0x02
+#define ST_NCI_DM_PIPE_OPEN           0x04
+#define ST_NCI_DM_RF_ACTIVE           0x80
+#define ST_NCI_DM_DISCONNECT          0x30
+
+#define ST_NCI_DM_IS_PIPE_OPEN(p) \
+	((p & 0x0f) == (ST_NCI_DM_PIPE_CREATED | ST_NCI_DM_PIPE_OPEN))
+
+#define ST_NCI_ATR_DEFAULT_BWI        0x04
+
+/*
+ * WT = 2^BWI/10[s], convert into msecs and add a secure
+ * room by increasing by 2 this timeout
+ */
+#define ST_NCI_BWI_TO_TIMEOUT(x)      ((1 << x) * 200)
+#define ST_NCI_ATR_GET_Y_FROM_TD(x)   (x >> 4)
+
+/* If TA is present bit 0 is set */
+#define ST_NCI_ATR_TA_PRESENT(x) (x & 0x01)
+/* If TB is present bit 1 is set */
+#define ST_NCI_ATR_TB_PRESENT(x) (x & 0x02)
+
+#define ST_NCI_NUM_DEVICES           256
+
+static DECLARE_BITMAP(dev_mask, ST_NCI_NUM_DEVICES);
+
+/* Here are the mandatory pipe for st_nci */
+static struct nci_hci_gate st_nci_gates[] = {
+	{NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+	{NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+	{ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DEVICE_MGNT_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+
+	/* Secure element pipes are created by secure element host */
+	{ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+	{ST_NCI_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+};
+
+static u8 st_nci_se_get_bwi(struct nci_dev *ndev)
+{
+	int i;
+	u8 td;
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	/* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
+	for (i = 1; i < ST_NCI_ESE_MAX_LENGTH; i++) {
+		td = ST_NCI_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
+		if (ST_NCI_ATR_TA_PRESENT(td))
+			i++;
+		if (ST_NCI_ATR_TB_PRESENT(td)) {
+			i++;
+			return info->se_info.atr[i] >> 4;
+		}
+	}
+	return ST_NCI_ATR_DEFAULT_BWI;
+}
+
+static void st_nci_se_get_atr(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+	int r;
+	struct sk_buff *skb;
+
+	r = nci_hci_get_param(ndev, ST_NCI_APDU_READER_GATE,
+				NCI_HCI_APDU_PARAM_ATR, &skb);
+	if (r < 0)
+		return;
+
+	if (skb->len <= ST_NCI_ESE_MAX_LENGTH) {
+		memcpy(info->se_info.atr, skb->data, skb->len);
+
+		info->se_info.wt_timeout =
+			ST_NCI_BWI_TO_TIMEOUT(st_nci_se_get_bwi(ndev));
+	}
+	kfree_skb(skb);
+}
+
+int st_nci_hci_load_session(struct nci_dev *ndev)
+{
+	int i, j, r;
+	struct sk_buff *skb_pipe_list, *skb_pipe_info;
+	struct st_nci_pipe_info *dm_pipe_info;
+	u8 pipe_list[] = { ST_NCI_DM_GETINFO_PIPE_LIST,
+			ST_NCI_TERMINAL_HOST_ID};
+	u8 pipe_info[] = { ST_NCI_DM_GETINFO_PIPE_INFO,
+			ST_NCI_TERMINAL_HOST_ID, 0};
+
+	/* On ST_NCI device pipes number are dynamics
+	 * If pipes are already created, hci_dev_up will fail.
+	 * Doing a clear all pipe is a bad idea because:
+	 * - It does useless EEPROM cycling
+	 * - It might cause issue for secure elements support
+	 * (such as removing connectivity or APDU reader pipe)
+	 * A better approach on ST_NCI is to:
+	 * - get a pipe list for each host.
+	 * (eg: ST_NCI_HOST_CONTROLLER_ID for now).
+	 * (TODO Later on UICC HOST and eSE HOST)
+	 * - get pipe information
+	 * - match retrieved pipe list in st_nci_gates
+	 * ST_NCI_DEVICE_MGNT_GATE is a proprietary gate
+	 * with ST_NCI_DEVICE_MGNT_PIPE.
+	 * Pipe can be closed and need to be open.
+	 */
+	r = nci_hci_connect_gate(ndev, ST_NCI_HOST_CONTROLLER_ID,
+				ST_NCI_DEVICE_MGNT_GATE,
+				ST_NCI_DEVICE_MGNT_PIPE);
+	if (r < 0)
+		goto free_info;
+
+	/* Get pipe list */
+	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
+			&skb_pipe_list);
+	if (r < 0)
+		goto free_info;
+
+	/* Complete the existing gate_pipe table */
+	for (i = 0; i < skb_pipe_list->len; i++) {
+		pipe_info[2] = skb_pipe_list->data[i];
+		r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+					ST_NCI_DM_GETINFO, pipe_info,
+					sizeof(pipe_info), &skb_pipe_info);
+
+		if (r)
+			continue;
+
+		/*
+		 * Match pipe ID and gate ID
+		 * Output format from ST21NFC_DM_GETINFO is:
+		 * - pipe state (1byte)
+		 * - source hid (1byte)
+		 * - source gid (1byte)
+		 * - destination hid (1byte)
+		 * - destination gid (1byte)
+		 */
+		dm_pipe_info = (struct st_nci_pipe_info *)skb_pipe_info->data;
+		if (dm_pipe_info->dst_gate_id == ST_NCI_APDU_READER_GATE &&
+		    dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
+			pr_err("Unexpected apdu_reader pipe on host %x\n",
+			       dm_pipe_info->src_host_id);
+			continue;
+		}
+
+		for (j = 0; (j < ARRAY_SIZE(st_nci_gates)) &&
+		     (st_nci_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
+			;
+
+		if (j < ARRAY_SIZE(st_nci_gates) &&
+		    st_nci_gates[j].gate == dm_pipe_info->dst_gate_id &&
+		    ST_NCI_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
+			st_nci_gates[j].pipe = pipe_info[2];
+
+			ndev->hci_dev->gate2pipe[st_nci_gates[j].gate] =
+						st_nci_gates[j].pipe;
+			ndev->hci_dev->pipes[st_nci_gates[j].pipe].gate =
+						st_nci_gates[j].gate;
+			ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
+						dm_pipe_info->src_host_id;
+		}
+	}
+
+	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
+	       sizeof(st_nci_gates));
+
+free_info:
+	kfree_skb(skb_pipe_info);
+	kfree_skb(skb_pipe_list);
+	return r;
+}
+EXPORT_SYMBOL_GPL(st_nci_hci_load_session);
+
+static void st_nci_hci_admin_event_received(struct nci_dev *ndev,
+					      u8 event, struct sk_buff *skb)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	switch (event) {
+	case ST_NCI_EVT_HOT_PLUG:
+		if (info->se_info.se_active) {
+			if (!ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
+				del_timer_sync(&info->se_info.se_active_timer);
+				info->se_info.se_active = false;
+				complete(&info->se_info.req_completion);
+			} else {
+				mod_timer(&info->se_info.se_active_timer,
+				      jiffies +
+				      msecs_to_jiffies(ST_NCI_SE_TO_PIPES));
+			}
+		}
+	break;
+	}
+}
+
+static int st_nci_hci_apdu_reader_event_received(struct nci_dev *ndev,
+						   u8 event,
+						   struct sk_buff *skb)
+{
+	int r = 0;
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	pr_debug("apdu reader gate event: %x\n", event);
+
+	switch (event) {
+	case ST_NCI_EVT_TRANSMIT_DATA:
+		del_timer_sync(&info->se_info.bwi_timer);
+		info->se_info.bwi_active = false;
+		info->se_info.cb(info->se_info.cb_context,
+				 skb->data, skb->len, 0);
+	break;
+	case ST_NCI_EVT_WTX_REQUEST:
+		mod_timer(&info->se_info.bwi_timer, jiffies +
+			  msecs_to_jiffies(info->se_info.wt_timeout));
+	break;
+	}
+
+	kfree_skb(skb);
+	return r;
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ *    1: driver does not handle the event, please do standard processing
+ */
+static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
+						u8 host, u8 event,
+						struct sk_buff *skb)
+{
+	int r = 0;
+	struct device *dev = &ndev->nfc_dev->dev;
+	struct nfc_evt_transaction *transaction;
+
+	pr_debug("connectivity gate event: %x\n", event);
+
+	switch (event) {
+	case ST_NCI_EVT_CONNECTIVITY:
+
+	break;
+	case ST_NCI_EVT_TRANSACTION:
+		/* According to specification etsi 102 622
+		 * 11.2.2.4 EVT_TRANSACTION Table 52
+		 * Description  Tag     Length
+		 * AID          81      5 to 16
+		 * PARAMETERS   82      0 to 255
+		 */
+		if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+		    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+			return -EPROTO;
+
+		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+					    skb->len - 2, GFP_KERNEL);
+
+		transaction->aid_len = skb->data[1];
+		memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
+
+		/* Check next byte is PARAMETERS tag (82) */
+		if (skb->data[transaction->aid_len + 2] !=
+		    NFC_EVT_TRANSACTION_PARAMS_TAG)
+			return -EPROTO;
+
+		transaction->params_len = skb->data[transaction->aid_len + 3];
+		memcpy(transaction->params, skb->data +
+		       transaction->aid_len + 4, transaction->params_len);
+
+		r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
+		break;
+	default:
+		return 1;
+	}
+	kfree_skb(skb);
+	return r;
+}
+
+void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
+				 u8 event, struct sk_buff *skb)
+{
+	u8 gate = ndev->hci_dev->pipes[pipe].gate;
+	u8 host = ndev->hci_dev->pipes[pipe].host;
+
+	switch (gate) {
+	case NCI_HCI_ADMIN_GATE:
+		st_nci_hci_admin_event_received(ndev, event, skb);
+	break;
+	case ST_NCI_APDU_READER_GATE:
+		st_nci_hci_apdu_reader_event_received(ndev, event, skb);
+	break;
+	case ST_NCI_CONNECTIVITY_GATE:
+		st_nci_hci_connectivity_event_received(ndev, host, event,
+							 skb);
+	break;
+	}
+}
+EXPORT_SYMBOL_GPL(st_nci_hci_event_received);
+
+
+void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+			       struct sk_buff *skb)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+	u8 gate = ndev->hci_dev->pipes[pipe].gate;
+
+	pr_debug("cmd: %x\n", cmd);
+
+	switch (cmd) {
+	case NCI_HCI_ANY_OPEN_PIPE:
+		if (gate != ST_NCI_APDU_READER_GATE &&
+		    ndev->hci_dev->pipes[pipe].host != ST_NCI_UICC_HOST_ID)
+			ndev->hci_dev->count_pipes++;
+
+		if (ndev->hci_dev->count_pipes ==
+		    ndev->hci_dev->expected_pipes) {
+			del_timer_sync(&info->se_info.se_active_timer);
+			info->se_info.se_active = false;
+			ndev->hci_dev->count_pipes = 0;
+			complete(&info->se_info.req_completion);
+		}
+	break;
+	}
+}
+EXPORT_SYMBOL_GPL(st_nci_hci_cmd_received);
+
+/*
+ * Remarks: On some early st_nci firmware, nci_nfcee_mode_set(0)
+ * is rejected
+ */
+static int st_nci_control_se(struct nci_dev *ndev, u8 se_idx,
+				   u8 state)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+	int r;
+	struct sk_buff *sk_host_list;
+	u8 host_id;
+
+	switch (se_idx) {
+	case ST_NCI_UICC_HOST_ID:
+		ndev->hci_dev->count_pipes = 0;
+		ndev->hci_dev->expected_pipes = ST_NCI_SE_COUNT_PIPE_UICC;
+		break;
+	case ST_NCI_ESE_HOST_ID:
+		ndev->hci_dev->count_pipes = 0;
+		ndev->hci_dev->expected_pipes = ST_NCI_SE_COUNT_PIPE_EMBEDDED;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * Wait for an EVT_HOT_PLUG in order to
+	 * retrieve a relevant host list.
+	 */
+	reinit_completion(&info->se_info.req_completion);
+	r = nci_nfcee_mode_set(ndev, se_idx, NCI_NFCEE_ENABLE);
+	if (r != NCI_STATUS_OK)
+		return r;
+
+	mod_timer(&info->se_info.se_active_timer, jiffies +
+		msecs_to_jiffies(ST_NCI_SE_TO_HOT_PLUG));
+	info->se_info.se_active = true;
+
+	/* Ignore return value and check in any case the host_list */
+	wait_for_completion_interruptible(&info->se_info.req_completion);
+
+	/* There might be some "collision" after receiving a HOT_PLUG event
+	 * This may cause the CLF to not answer to the next hci command.
+	 * There is no possible synchronization to prevent this.
+	 * Adding a small delay is the only way to solve the issue.
+	 */
+	usleep_range(3000, 5000);
+
+	r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
+			NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list);
+	if (r != NCI_HCI_ANY_OK)
+		return r;
+
+	host_id = sk_host_list->data[sk_host_list->len - 1];
+	kfree_skb(sk_host_list);
+	if (state == ST_NCI_SE_MODE_ON && host_id == se_idx)
+		return se_idx;
+	else if (state == ST_NCI_SE_MODE_OFF && host_id != se_idx)
+		return se_idx;
+
+	return -1;
+}
+
+int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
+{
+	int r;
+
+	pr_debug("st_nci_disable_se\n");
+
+	if (se_idx == NFC_SE_EMBEDDED) {
+		r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+				ST_NCI_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
+		if (r < 0)
+			return r;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(st_nci_disable_se);
+
+int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
+{
+	int r;
+
+	pr_debug("st_nci_enable_se\n");
+
+	if (se_idx == ST_NCI_HCI_HOST_ID_ESE) {
+		r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+				ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
+		if (r < 0)
+			return r;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(st_nci_enable_se);
+
+static int st_nci_hci_network_init(struct nci_dev *ndev)
+{
+	struct core_conn_create_dest_spec_params *dest_params;
+	struct dest_spec_params spec_params;
+	struct nci_conn_info    *conn_info;
+	int r, dev_num;
+
+	dest_params =
+		kzalloc(sizeof(struct core_conn_create_dest_spec_params) +
+			sizeof(struct dest_spec_params), GFP_KERNEL);
+	if (dest_params == NULL) {
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE;
+	dest_params->length = sizeof(struct dest_spec_params);
+	spec_params.id = ndev->hci_dev->nfcee_id;
+	spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS;
+	memcpy(dest_params->value, &spec_params,
+	       sizeof(struct dest_spec_params));
+	r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1,
+				 sizeof(struct core_conn_create_dest_spec_params) +
+				 sizeof(struct dest_spec_params),
+				 dest_params);
+	if (r != NCI_STATUS_OK)
+		goto free_dest_params;
+
+	conn_info = ndev->hci_dev->conn_info;
+	if (!conn_info)
+		goto free_dest_params;
+
+	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
+	       sizeof(st_nci_gates));
+
+	/*
+	 * Session id must include the driver name + i2c bus addr
+	 * persistent info to discriminate 2 identical chips
+	 */
+	dev_num = find_first_zero_bit(dev_mask, ST_NCI_NUM_DEVICES);
+	if (dev_num >= ST_NCI_NUM_DEVICES) {
+		r = -ENODEV;
+		goto free_dest_params;
+	}
+
+	scnprintf(ndev->hci_dev->init_data.session_id,
+		  sizeof(ndev->hci_dev->init_data.session_id),
+		  "%s%2x", "ST21BH", dev_num);
+
+	r = nci_hci_dev_session_init(ndev);
+	if (r != NCI_HCI_ANY_OK)
+		goto free_dest_params;
+
+	r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+			       NCI_NFCEE_ENABLE);
+	if (r != NCI_STATUS_OK)
+		goto free_dest_params;
+
+free_dest_params:
+	kfree(dest_params);
+
+exit:
+	return r;
+}
+
+int st_nci_discover_se(struct nci_dev *ndev)
+{
+	u8 param[2];
+	int r;
+	int se_count = 0;
+
+	pr_debug("st_nci_discover_se\n");
+
+	r = st_nci_hci_network_init(ndev);
+	if (r != 0)
+		return r;
+
+	param[0] = ST_NCI_UICC_HOST_ID;
+	param[1] = ST_NCI_HCI_HOST_ID_ESE;
+	r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
+				NCI_HCI_ADMIN_PARAM_WHITELIST,
+				param, sizeof(param));
+	if (r != NCI_HCI_ANY_OK)
+		return r;
+
+	r = st_nci_control_se(ndev, ST_NCI_UICC_HOST_ID,
+				ST_NCI_SE_MODE_ON);
+	if (r == ST_NCI_UICC_HOST_ID) {
+		nfc_add_se(ndev->nfc_dev, ST_NCI_UICC_HOST_ID, NFC_SE_UICC);
+		se_count++;
+	}
+
+	/* Try to enable eSE in order to check availability */
+	r = st_nci_control_se(ndev, ST_NCI_HCI_HOST_ID_ESE,
+				ST_NCI_SE_MODE_ON);
+	if (r == ST_NCI_HCI_HOST_ID_ESE) {
+		nfc_add_se(ndev->nfc_dev, ST_NCI_HCI_HOST_ID_ESE,
+			   NFC_SE_EMBEDDED);
+		se_count++;
+		st_nci_se_get_atr(ndev);
+	}
+
+	return !se_count;
+}
+EXPORT_SYMBOL_GPL(st_nci_discover_se);
+
+int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+		       u8 *apdu, size_t apdu_length,
+		       se_io_cb_t cb, void *cb_context)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	pr_debug("\n");
+
+	switch (se_idx) {
+	case ST_NCI_HCI_HOST_ID_ESE:
+		info->se_info.cb = cb;
+		info->se_info.cb_context = cb_context;
+		mod_timer(&info->se_info.bwi_timer, jiffies +
+			  msecs_to_jiffies(info->se_info.wt_timeout));
+		info->se_info.bwi_active = true;
+		return nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+					ST_NCI_EVT_TRANSMIT_DATA, apdu,
+					apdu_length);
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(st_nci_se_io);
+
+static void st_nci_se_wt_timeout(unsigned long data)
+{
+	/*
+	 * No answer from the secure element
+	 * within the defined timeout.
+	 * Let's send a reset request as recovery procedure.
+	 * According to the situation, we first try to send a software reset
+	 * to the secure element. If the next command is still not
+	 * answering in time, we send to the CLF a secure element hardware
+	 * reset request.
+	 */
+	/* hardware reset managed through VCC_UICC_OUT power supply */
+	u8 param = 0x01;
+	struct st_nci_info *info = (struct st_nci_info *) data;
+
+	pr_debug("\n");
+
+	info->se_info.bwi_active = false;
+
+	if (!info->se_info.xch_error) {
+		info->se_info.xch_error = true;
+		nci_hci_send_event(info->ndlc->ndev, ST_NCI_APDU_READER_GATE,
+				ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
+	} else {
+		info->se_info.xch_error = false;
+		nci_hci_send_event(info->ndlc->ndev, ST_NCI_DEVICE_MGNT_GATE,
+				ST_NCI_EVT_SE_HARD_RESET, &param, 1);
+	}
+	info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
+}
+
+static void st_nci_se_activation_timeout(unsigned long data)
+{
+	struct st_nci_info *info = (struct st_nci_info *) data;
+
+	pr_debug("\n");
+
+	info->se_info.se_active = false;
+
+	complete(&info->se_info.req_completion);
+}
+
+int st_nci_se_init(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	init_completion(&info->se_info.req_completion);
+	/* initialize timers */
+	init_timer(&info->se_info.bwi_timer);
+	info->se_info.bwi_timer.data = (unsigned long)info;
+	info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+	info->se_info.bwi_active = false;
+
+	init_timer(&info->se_info.se_active_timer);
+	info->se_info.se_active_timer.data = (unsigned long)info;
+	info->se_info.se_active_timer.function =
+			st_nci_se_activation_timeout;
+	info->se_info.se_active = false;
+
+	info->se_info.xch_error = false;
+
+	info->se_info.wt_timeout =
+		ST_NCI_BWI_TO_TIMEOUT(ST_NCI_ATR_DEFAULT_BWI);
+
+	return 0;
+}
+EXPORT_SYMBOL(st_nci_se_init);
+
+void st_nci_se_deinit(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	if (info->se_info.bwi_active)
+		del_timer_sync(&info->se_info.bwi_timer);
+	if (info->se_info.se_active)
+		del_timer_sync(&info->se_info.se_active_timer);
+
+	info->se_info.se_active = false;
+	info->se_info.bwi_active = false;
+}
+EXPORT_SYMBOL(st_nci_se_deinit);
+
diff --git a/drivers/nfc/st-nci/st-nci_se.h b/drivers/nfc/st-nci/st-nci_se.h
new file mode 100644
index 0000000..ea66e87
--- /dev/null
+++ b/drivers/nfc/st-nci/st-nci_se.h
@@ -0,0 +1,61 @@
+/*
+ * Secure Element Driver for STMicroelectronics NFC NCI Chip
+ *
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LOCAL_ST_NCI_SE_H_
+#define __LOCAL_ST_NCI_SE_H_
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST_NCI_ESE_MAX_LENGTH	33
+#define ST_NCI_HCI_HOST_ID_ESE	0xc0
+
+struct st_nci_se_info {
+	u8 atr[ST_NCI_ESE_MAX_LENGTH];
+	struct completion req_completion;
+
+	struct timer_list bwi_timer;
+	int wt_timeout; /* in msecs */
+	bool bwi_active;
+
+	struct timer_list se_active_timer;
+	bool se_active;
+
+	bool xch_error;
+
+	se_io_cb_t cb;
+	void *cb_context;
+};
+
+int st_nci_se_init(struct nci_dev *ndev);
+void st_nci_se_deinit(struct nci_dev *ndev);
+
+int st_nci_discover_se(struct nci_dev *ndev);
+int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
+int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
+int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+		u8 *apdu, size_t apdu_length,
+		se_io_cb_t cb, void *cb_context);
+int st_nci_hci_load_session(struct nci_dev *ndev);
+void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
+			u8 event, struct sk_buff *skb);
+void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+			struct sk_buff *skb);
+
+
+#endif /* __LOCAL_ST_NCI_SE_H_ */
diff --git a/drivers/nfc/st21nfcb/Kconfig b/drivers/nfc/st21nfcb/Kconfig
deleted file mode 100644
index e0322dd..0000000
--- a/drivers/nfc/st21nfcb/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-config NFC_ST21NFCB
-	tristate "STMicroelectronics ST21NFCB NFC driver"
-	depends on NFC_NCI
-	default n
-	---help---
-	  STMicroelectronics ST21NFCB core driver. It implements the chipset
-	  NCI logic and hooks into the NFC kernel APIs. Physical layers will
-	  register against it.
-
-	  To compile this driver as a module, choose m here. The module will
-	  be called st21nfcb.
-	  Say N if unsure.
-
-config NFC_ST21NFCB_I2C
-	tristate "NFC ST21NFCB i2c support"
-	depends on NFC_ST21NFCB && I2C
-	---help---
-	  This module adds support for the STMicroelectronics st21nfcb i2c interface.
-	  Select this if your platform is using the i2c bus.
-
-	  If you choose to build a module, it'll be called st21nfcb_i2c.
-	  Say N if unsure.
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile
deleted file mode 100644
index ce659a9..0000000
--- a/drivers/nfc/st21nfcb/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for ST21NFCB NCI based NFC driver
-#
-
-st21nfcb_nci-objs = ndlc.o st21nfcb.o st21nfcb_se.o
-obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb_nci.o
-
-st21nfcb_i2c-objs = i2c.o
-obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o
diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c
deleted file mode 100644
index ca9871a..0000000
--- a/drivers/nfc/st21nfcb/st21nfcb.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * NCI based Driver for STMicroelectronics NFC Chip
- *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/nfc.h>
-#include <net/nfc/nci.h>
-#include <net/nfc/nci_core.h>
-
-#include "st21nfcb.h"
-#include "st21nfcb_se.h"
-
-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
-
-#define ST21NFCB_NCI1_X_PROPRIETARY_ISO15693 0x83
-
-static int st21nfcb_nci_open(struct nci_dev *ndev)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-	int r;
-
-	if (test_and_set_bit(ST21NFCB_NCI_RUNNING, &info->flags))
-		return 0;
-
-	r = ndlc_open(info->ndlc);
-	if (r)
-		clear_bit(ST21NFCB_NCI_RUNNING, &info->flags);
-
-	return r;
-}
-
-static int st21nfcb_nci_close(struct nci_dev *ndev)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	if (!test_and_clear_bit(ST21NFCB_NCI_RUNNING, &info->flags))
-		return 0;
-
-	ndlc_close(info->ndlc);
-
-	return 0;
-}
-
-static int st21nfcb_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	skb->dev = (void *)ndev;
-
-	if (!test_bit(ST21NFCB_NCI_RUNNING, &info->flags))
-		return -EBUSY;
-
-	return ndlc_send(info->ndlc, skb);
-}
-
-static __u32 st21nfcb_nci_get_rfprotocol(struct nci_dev *ndev,
-					 __u8 rf_protocol)
-{
-	return rf_protocol == ST21NFCB_NCI1_X_PROPRIETARY_ISO15693 ?
-		NFC_PROTO_ISO15693_MASK : 0;
-}
-
-static struct nci_ops st21nfcb_nci_ops = {
-	.open = st21nfcb_nci_open,
-	.close = st21nfcb_nci_close,
-	.send = st21nfcb_nci_send,
-	.get_rfprotocol = st21nfcb_nci_get_rfprotocol,
-	.discover_se = st21nfcb_nci_discover_se,
-	.enable_se = st21nfcb_nci_enable_se,
-	.disable_se = st21nfcb_nci_disable_se,
-	.se_io = st21nfcb_nci_se_io,
-	.hci_load_session = st21nfcb_hci_load_session,
-	.hci_event_received = st21nfcb_hci_event_received,
-	.hci_cmd_received = st21nfcb_hci_cmd_received,
-};
-
-int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
-		       int phy_tailroom)
-{
-	struct st21nfcb_nci_info *info;
-	int r;
-	u32 protocols;
-
-	info = devm_kzalloc(ndlc->dev,
-			sizeof(struct st21nfcb_nci_info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	protocols = NFC_PROTO_JEWEL_MASK
-		| NFC_PROTO_MIFARE_MASK
-		| NFC_PROTO_FELICA_MASK
-		| NFC_PROTO_ISO14443_MASK
-		| NFC_PROTO_ISO14443_B_MASK
-		| NFC_PROTO_ISO15693_MASK
-		| NFC_PROTO_NFC_DEP_MASK;
-
-	ndlc->ndev = nci_allocate_device(&st21nfcb_nci_ops, protocols,
-					phy_headroom, phy_tailroom);
-	if (!ndlc->ndev) {
-		pr_err("Cannot allocate nfc ndev\n");
-		return -ENOMEM;
-	}
-	info->ndlc = ndlc;
-
-	nci_set_drvdata(ndlc->ndev, info);
-
-	r = nci_register_device(ndlc->ndev);
-	if (r) {
-		pr_err("Cannot register nfc device to nci core\n");
-		nci_free_device(ndlc->ndev);
-		return r;
-	}
-
-	return st21nfcb_se_init(ndlc->ndev);
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
-
-void st21nfcb_nci_remove(struct nci_dev *ndev)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	nci_unregister_device(ndev);
-	nci_free_device(ndev);
-	kfree(info);
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_remove);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.c b/drivers/nfc/st21nfcb/st21nfcb_se.c
deleted file mode 100644
index 24862a5..0000000
--- a/drivers/nfc/st21nfcb/st21nfcb_se.c
+++ /dev/null
@@ -1,713 +0,0 @@
-/*
- * NCI based Driver for STMicroelectronics NFC Chip
- *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/nfc.h>
-#include <linux/delay.h>
-#include <net/nfc/nci.h>
-#include <net/nfc/nci_core.h>
-
-#include "st21nfcb.h"
-#include "st21nfcb_se.h"
-
-struct st21nfcb_pipe_info {
-	u8 pipe_state;
-	u8 src_host_id;
-	u8 src_gate_id;
-	u8 dst_host_id;
-	u8 dst_gate_id;
-} __packed;
-
-/* Hosts */
-#define ST21NFCB_HOST_CONTROLLER_ID     0x00
-#define ST21NFCB_TERMINAL_HOST_ID       0x01
-#define ST21NFCB_UICC_HOST_ID           0x02
-#define ST21NFCB_ESE_HOST_ID            0xc0
-
-/* Gates */
-#define ST21NFCB_DEVICE_MGNT_GATE       0x01
-#define ST21NFCB_APDU_READER_GATE       0xf0
-#define ST21NFCB_CONNECTIVITY_GATE      0x41
-
-/* Pipes */
-#define ST21NFCB_DEVICE_MGNT_PIPE               0x02
-
-/* Connectivity pipe only */
-#define ST21NFCB_SE_COUNT_PIPE_UICC             0x01
-/* Connectivity + APDU Reader pipe */
-#define ST21NFCB_SE_COUNT_PIPE_EMBEDDED         0x02
-
-#define ST21NFCB_SE_TO_HOT_PLUG			1000 /* msecs */
-#define ST21NFCB_SE_TO_PIPES			2000
-
-#define ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(x)   (x->data[0] & 0x80)
-
-#define NCI_HCI_APDU_PARAM_ATR                     0x01
-#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY       0x01
-#define NCI_HCI_ADMIN_PARAM_WHITELIST              0x03
-#define NCI_HCI_ADMIN_PARAM_HOST_LIST              0x04
-
-#define ST21NFCB_EVT_SE_HARD_RESET		0x20
-#define ST21NFCB_EVT_TRANSMIT_DATA		0x10
-#define ST21NFCB_EVT_WTX_REQUEST		0x11
-#define ST21NFCB_EVT_SE_SOFT_RESET		0x11
-#define ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER	0x21
-#define ST21NFCB_EVT_HOT_PLUG			0x03
-
-#define ST21NFCB_SE_MODE_OFF                    0x00
-#define ST21NFCB_SE_MODE_ON                     0x01
-
-#define ST21NFCB_EVT_CONNECTIVITY       0x10
-#define ST21NFCB_EVT_TRANSACTION        0x12
-
-#define ST21NFCB_DM_GETINFO             0x13
-#define ST21NFCB_DM_GETINFO_PIPE_LIST   0x02
-#define ST21NFCB_DM_GETINFO_PIPE_INFO   0x01
-#define ST21NFCB_DM_PIPE_CREATED        0x02
-#define ST21NFCB_DM_PIPE_OPEN           0x04
-#define ST21NFCB_DM_RF_ACTIVE           0x80
-#define ST21NFCB_DM_DISCONNECT          0x30
-
-#define ST21NFCB_DM_IS_PIPE_OPEN(p) \
-	((p & 0x0f) == (ST21NFCB_DM_PIPE_CREATED | ST21NFCB_DM_PIPE_OPEN))
-
-#define ST21NFCB_ATR_DEFAULT_BWI        0x04
-
-/*
- * WT = 2^BWI/10[s], convert into msecs and add a secure
- * room by increasing by 2 this timeout
- */
-#define ST21NFCB_BWI_TO_TIMEOUT(x)      ((1 << x) * 200)
-#define ST21NFCB_ATR_GET_Y_FROM_TD(x)   (x >> 4)
-
-/* If TA is present bit 0 is set */
-#define ST21NFCB_ATR_TA_PRESENT(x) (x & 0x01)
-/* If TB is present bit 1 is set */
-#define ST21NFCB_ATR_TB_PRESENT(x) (x & 0x02)
-
-#define ST21NFCB_NUM_DEVICES           256
-
-static DECLARE_BITMAP(dev_mask, ST21NFCB_NUM_DEVICES);
-
-/* Here are the mandatory pipe for st21nfcb */
-static struct nci_hci_gate st21nfcb_gates[] = {
-	{NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE,
-					ST21NFCB_HOST_CONTROLLER_ID},
-	{NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE,
-					ST21NFCB_HOST_CONTROLLER_ID},
-	{ST21NFCB_DEVICE_MGNT_GATE, ST21NFCB_DEVICE_MGNT_PIPE,
-					ST21NFCB_HOST_CONTROLLER_ID},
-
-	/* Secure element pipes are created by secure element host */
-	{ST21NFCB_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
-					ST21NFCB_HOST_CONTROLLER_ID},
-	{ST21NFCB_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
-					ST21NFCB_HOST_CONTROLLER_ID},
-};
-
-static u8 st21nfcb_se_get_bwi(struct nci_dev *ndev)
-{
-	int i;
-	u8 td;
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	/* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
-	for (i = 1; i < ST21NFCB_ESE_MAX_LENGTH; i++) {
-		td = ST21NFCB_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
-		if (ST21NFCB_ATR_TA_PRESENT(td))
-			i++;
-		if (ST21NFCB_ATR_TB_PRESENT(td)) {
-			i++;
-			return info->se_info.atr[i] >> 4;
-		}
-	}
-	return ST21NFCB_ATR_DEFAULT_BWI;
-}
-
-static void st21nfcb_se_get_atr(struct nci_dev *ndev)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-	int r;
-	struct sk_buff *skb;
-
-	r = nci_hci_get_param(ndev, ST21NFCB_APDU_READER_GATE,
-				NCI_HCI_APDU_PARAM_ATR, &skb);
-	if (r < 0)
-		return;
-
-	if (skb->len <= ST21NFCB_ESE_MAX_LENGTH) {
-		memcpy(info->se_info.atr, skb->data, skb->len);
-
-		info->se_info.wt_timeout =
-			ST21NFCB_BWI_TO_TIMEOUT(st21nfcb_se_get_bwi(ndev));
-	}
-	kfree_skb(skb);
-}
-
-int st21nfcb_hci_load_session(struct nci_dev *ndev)
-{
-	int i, j, r;
-	struct sk_buff *skb_pipe_list, *skb_pipe_info;
-	struct st21nfcb_pipe_info *dm_pipe_info;
-	u8 pipe_list[] = { ST21NFCB_DM_GETINFO_PIPE_LIST,
-			ST21NFCB_TERMINAL_HOST_ID};
-	u8 pipe_info[] = { ST21NFCB_DM_GETINFO_PIPE_INFO,
-			ST21NFCB_TERMINAL_HOST_ID, 0};
-
-	/* On ST21NFCB device pipes number are dynamics
-	 * If pipes are already created, hci_dev_up will fail.
-	 * Doing a clear all pipe is a bad idea because:
-	 * - It does useless EEPROM cycling
-	 * - It might cause issue for secure elements support
-	 * (such as removing connectivity or APDU reader pipe)
-	 * A better approach on ST21NFCB is to:
-	 * - get a pipe list for each host.
-	 * (eg: ST21NFCB_HOST_CONTROLLER_ID for now).
-	 * (TODO Later on UICC HOST and eSE HOST)
-	 * - get pipe information
-	 * - match retrieved pipe list in st21nfcb_gates
-	 * ST21NFCB_DEVICE_MGNT_GATE is a proprietary gate
-	 * with ST21NFCB_DEVICE_MGNT_PIPE.
-	 * Pipe can be closed and need to be open.
-	 */
-	r = nci_hci_connect_gate(ndev, ST21NFCB_HOST_CONTROLLER_ID,
-				ST21NFCB_DEVICE_MGNT_GATE,
-				ST21NFCB_DEVICE_MGNT_PIPE);
-	if (r < 0)
-		goto free_info;
-
-	/* Get pipe list */
-	r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
-			ST21NFCB_DM_GETINFO, pipe_list, sizeof(pipe_list),
-			&skb_pipe_list);
-	if (r < 0)
-		goto free_info;
-
-	/* Complete the existing gate_pipe table */
-	for (i = 0; i < skb_pipe_list->len; i++) {
-		pipe_info[2] = skb_pipe_list->data[i];
-		r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
-					ST21NFCB_DM_GETINFO, pipe_info,
-					sizeof(pipe_info), &skb_pipe_info);
-
-		if (r)
-			continue;
-
-		/*
-		 * Match pipe ID and gate ID
-		 * Output format from ST21NFC_DM_GETINFO is:
-		 * - pipe state (1byte)
-		 * - source hid (1byte)
-		 * - source gid (1byte)
-		 * - destination hid (1byte)
-		 * - destination gid (1byte)
-		 */
-		dm_pipe_info = (struct st21nfcb_pipe_info *)skb_pipe_info->data;
-		if (dm_pipe_info->dst_gate_id == ST21NFCB_APDU_READER_GATE &&
-		    dm_pipe_info->src_host_id != ST21NFCB_ESE_HOST_ID) {
-			pr_err("Unexpected apdu_reader pipe on host %x\n",
-			       dm_pipe_info->src_host_id);
-			continue;
-		}
-
-		for (j = 0; (j < ARRAY_SIZE(st21nfcb_gates)) &&
-		     (st21nfcb_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
-			;
-
-		if (j < ARRAY_SIZE(st21nfcb_gates) &&
-		    st21nfcb_gates[j].gate == dm_pipe_info->dst_gate_id &&
-		    ST21NFCB_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
-			st21nfcb_gates[j].pipe = pipe_info[2];
-
-			ndev->hci_dev->gate2pipe[st21nfcb_gates[j].gate] =
-						st21nfcb_gates[j].pipe;
-			ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].gate =
-						st21nfcb_gates[j].gate;
-			ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].host =
-						dm_pipe_info->src_host_id;
-		}
-	}
-
-	memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
-	       sizeof(st21nfcb_gates));
-
-free_info:
-	kfree_skb(skb_pipe_info);
-	kfree_skb(skb_pipe_list);
-	return r;
-}
-EXPORT_SYMBOL_GPL(st21nfcb_hci_load_session);
-
-static void st21nfcb_hci_admin_event_received(struct nci_dev *ndev,
-					      u8 event, struct sk_buff *skb)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	switch (event) {
-	case ST21NFCB_EVT_HOT_PLUG:
-		if (info->se_info.se_active) {
-			if (!ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
-				del_timer_sync(&info->se_info.se_active_timer);
-				info->se_info.se_active = false;
-				complete(&info->se_info.req_completion);
-			} else {
-				mod_timer(&info->se_info.se_active_timer,
-				      jiffies +
-				      msecs_to_jiffies(ST21NFCB_SE_TO_PIPES));
-			}
-		}
-	break;
-	}
-}
-
-static int st21nfcb_hci_apdu_reader_event_received(struct nci_dev *ndev,
-						   u8 event,
-						   struct sk_buff *skb)
-{
-	int r = 0;
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	pr_debug("apdu reader gate event: %x\n", event);
-
-	switch (event) {
-	case ST21NFCB_EVT_TRANSMIT_DATA:
-		del_timer_sync(&info->se_info.bwi_timer);
-		info->se_info.bwi_active = false;
-		info->se_info.cb(info->se_info.cb_context,
-				 skb->data, skb->len, 0);
-	break;
-	case ST21NFCB_EVT_WTX_REQUEST:
-		mod_timer(&info->se_info.bwi_timer, jiffies +
-			  msecs_to_jiffies(info->se_info.wt_timeout));
-	break;
-	}
-
-	kfree_skb(skb);
-	return r;
-}
-
-/*
- * Returns:
- * <= 0: driver handled the event, skb consumed
- *    1: driver does not handle the event, please do standard processing
- */
-static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
-						u8 host, u8 event,
-						struct sk_buff *skb)
-{
-	int r = 0;
-	struct device *dev = &ndev->nfc_dev->dev;
-	struct nfc_evt_transaction *transaction;
-
-	pr_debug("connectivity gate event: %x\n", event);
-
-	switch (event) {
-	case ST21NFCB_EVT_CONNECTIVITY:
-
-	break;
-	case ST21NFCB_EVT_TRANSACTION:
-		/* According to specification etsi 102 622
-		 * 11.2.2.4 EVT_TRANSACTION Table 52
-		 * Description  Tag     Length
-		 * AID          81      5 to 16
-		 * PARAMETERS   82      0 to 255
-		 */
-		if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
-		    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
-			return -EPROTO;
-
-		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
-					    skb->len - 2, GFP_KERNEL);
-
-		transaction->aid_len = skb->data[1];
-		memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
-
-		/* Check next byte is PARAMETERS tag (82) */
-		if (skb->data[transaction->aid_len + 2] !=
-		    NFC_EVT_TRANSACTION_PARAMS_TAG)
-			return -EPROTO;
-
-		transaction->params_len = skb->data[transaction->aid_len + 3];
-		memcpy(transaction->params, skb->data +
-		       transaction->aid_len + 4, transaction->params_len);
-
-		r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
-		break;
-	default:
-		return 1;
-	}
-	kfree_skb(skb);
-	return r;
-}
-
-void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
-				 u8 event, struct sk_buff *skb)
-{
-	u8 gate = ndev->hci_dev->pipes[pipe].gate;
-	u8 host = ndev->hci_dev->pipes[pipe].host;
-
-	switch (gate) {
-	case NCI_HCI_ADMIN_GATE:
-		st21nfcb_hci_admin_event_received(ndev, event, skb);
-	break;
-	case ST21NFCB_APDU_READER_GATE:
-		st21nfcb_hci_apdu_reader_event_received(ndev, event, skb);
-	break;
-	case ST21NFCB_CONNECTIVITY_GATE:
-		st21nfcb_hci_connectivity_event_received(ndev, host, event,
-							 skb);
-	break;
-	}
-}
-EXPORT_SYMBOL_GPL(st21nfcb_hci_event_received);
-
-
-void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
-			       struct sk_buff *skb)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-	u8 gate = ndev->hci_dev->pipes[pipe].gate;
-
-	pr_debug("cmd: %x\n", cmd);
-
-	switch (cmd) {
-	case NCI_HCI_ANY_OPEN_PIPE:
-		if (gate != ST21NFCB_APDU_READER_GATE &&
-		    ndev->hci_dev->pipes[pipe].host != ST21NFCB_UICC_HOST_ID)
-			ndev->hci_dev->count_pipes++;
-
-		if (ndev->hci_dev->count_pipes ==
-		    ndev->hci_dev->expected_pipes) {
-			del_timer_sync(&info->se_info.se_active_timer);
-			info->se_info.se_active = false;
-			ndev->hci_dev->count_pipes = 0;
-			complete(&info->se_info.req_completion);
-		}
-	break;
-	}
-}
-EXPORT_SYMBOL_GPL(st21nfcb_hci_cmd_received);
-
-/*
- * Remarks: On some early st21nfcb firmware, nci_nfcee_mode_set(0)
- * is rejected
- */
-static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx,
-				   u8 state)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-	int r;
-	struct sk_buff *sk_host_list;
-	u8 host_id;
-
-	switch (se_idx) {
-	case ST21NFCB_UICC_HOST_ID:
-		ndev->hci_dev->count_pipes = 0;
-		ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_UICC;
-		break;
-	case ST21NFCB_ESE_HOST_ID:
-		ndev->hci_dev->count_pipes = 0;
-		ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_EMBEDDED;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/*
-	 * Wait for an EVT_HOT_PLUG in order to
-	 * retrieve a relevant host list.
-	 */
-	reinit_completion(&info->se_info.req_completion);
-	r = nci_nfcee_mode_set(ndev, se_idx, NCI_NFCEE_ENABLE);
-	if (r != NCI_STATUS_OK)
-		return r;
-
-	mod_timer(&info->se_info.se_active_timer, jiffies +
-		msecs_to_jiffies(ST21NFCB_SE_TO_HOT_PLUG));
-	info->se_info.se_active = true;
-
-	/* Ignore return value and check in any case the host_list */
-	wait_for_completion_interruptible(&info->se_info.req_completion);
-
-	/* There might be some "collision" after receiving a HOT_PLUG event
-	 * This may cause the CLF to not answer to the next hci command.
-	 * There is no possible synchronization to prevent this.
-	 * Adding a small delay is the only way to solve the issue.
-	 */
-	usleep_range(3000, 5000);
-
-	r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
-			NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list);
-	if (r != NCI_HCI_ANY_OK)
-		return r;
-
-	host_id = sk_host_list->data[sk_host_list->len - 1];
-	kfree_skb(sk_host_list);
-	if (state == ST21NFCB_SE_MODE_ON && host_id == se_idx)
-		return se_idx;
-	else if (state == ST21NFCB_SE_MODE_OFF && host_id != se_idx)
-		return se_idx;
-
-	return -1;
-}
-
-int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
-{
-	int r;
-
-	pr_debug("st21nfcb_nci_disable_se\n");
-
-	if (se_idx == NFC_SE_EMBEDDED) {
-		r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
-				ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
-		if (r < 0)
-			return r;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_disable_se);
-
-int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
-{
-	int r;
-
-	pr_debug("st21nfcb_nci_enable_se\n");
-
-	if (se_idx == ST21NFCB_HCI_HOST_ID_ESE) {
-		r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
-				ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
-		if (r < 0)
-			return r;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_enable_se);
-
-static int st21nfcb_hci_network_init(struct nci_dev *ndev)
-{
-	struct core_conn_create_dest_spec_params *dest_params;
-	struct dest_spec_params spec_params;
-	struct nci_conn_info    *conn_info;
-	int r, dev_num;
-
-	dest_params =
-		kzalloc(sizeof(struct core_conn_create_dest_spec_params) +
-			sizeof(struct dest_spec_params), GFP_KERNEL);
-	if (dest_params == NULL) {
-		r = -ENOMEM;
-		goto exit;
-	}
-
-	dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE;
-	dest_params->length = sizeof(struct dest_spec_params);
-	spec_params.id = ndev->hci_dev->nfcee_id;
-	spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS;
-	memcpy(dest_params->value, &spec_params, sizeof(struct dest_spec_params));
-	r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1,
-				 sizeof(struct core_conn_create_dest_spec_params) +
-				 sizeof(struct dest_spec_params),
-				 dest_params);
-	if (r != NCI_STATUS_OK)
-		goto free_dest_params;
-
-	conn_info = ndev->hci_dev->conn_info;
-	if (!conn_info)
-		goto free_dest_params;
-
-	memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
-	       sizeof(st21nfcb_gates));
-
-	/*
-	 * Session id must include the driver name + i2c bus addr
-	 * persistent info to discriminate 2 identical chips
-	 */
-	dev_num = find_first_zero_bit(dev_mask, ST21NFCB_NUM_DEVICES);
-	if (dev_num >= ST21NFCB_NUM_DEVICES) {
-		r = -ENODEV;
-		goto free_dest_params;
-	}
-
-	scnprintf(ndev->hci_dev->init_data.session_id,
-		  sizeof(ndev->hci_dev->init_data.session_id),
-		  "%s%2x", "ST21BH", dev_num);
-
-	r = nci_hci_dev_session_init(ndev);
-	if (r != NCI_HCI_ANY_OK)
-		goto free_dest_params;
-
-	r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
-			       NCI_NFCEE_ENABLE);
-	if (r != NCI_STATUS_OK)
-		goto free_dest_params;
-
-free_dest_params:
-	kfree(dest_params);
-
-exit:
-	return r;
-}
-
-int st21nfcb_nci_discover_se(struct nci_dev *ndev)
-{
-	u8 param[2];
-	int r;
-	int se_count = 0;
-
-	pr_debug("st21nfcb_nci_discover_se\n");
-
-	r = st21nfcb_hci_network_init(ndev);
-	if (r != 0)
-		return r;
-
-	param[0] = ST21NFCB_UICC_HOST_ID;
-	param[1] = ST21NFCB_HCI_HOST_ID_ESE;
-	r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
-				NCI_HCI_ADMIN_PARAM_WHITELIST,
-				param, sizeof(param));
-	if (r != NCI_HCI_ANY_OK)
-		return r;
-
-	r = st21nfcb_nci_control_se(ndev, ST21NFCB_UICC_HOST_ID,
-				ST21NFCB_SE_MODE_ON);
-	if (r == ST21NFCB_UICC_HOST_ID) {
-		nfc_add_se(ndev->nfc_dev, ST21NFCB_UICC_HOST_ID, NFC_SE_UICC);
-		se_count++;
-	}
-
-	/* Try to enable eSE in order to check availability */
-	r = st21nfcb_nci_control_se(ndev, ST21NFCB_HCI_HOST_ID_ESE,
-				ST21NFCB_SE_MODE_ON);
-	if (r == ST21NFCB_HCI_HOST_ID_ESE) {
-		nfc_add_se(ndev->nfc_dev, ST21NFCB_HCI_HOST_ID_ESE,
-			   NFC_SE_EMBEDDED);
-		se_count++;
-		st21nfcb_se_get_atr(ndev);
-	}
-
-	return !se_count;
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_discover_se);
-
-int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
-		       u8 *apdu, size_t apdu_length,
-		       se_io_cb_t cb, void *cb_context)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	pr_debug("\n");
-
-	switch (se_idx) {
-	case ST21NFCB_HCI_HOST_ID_ESE:
-		info->se_info.cb = cb;
-		info->se_info.cb_context = cb_context;
-		mod_timer(&info->se_info.bwi_timer, jiffies +
-			  msecs_to_jiffies(info->se_info.wt_timeout));
-		info->se_info.bwi_active = true;
-		return nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
-					ST21NFCB_EVT_TRANSMIT_DATA, apdu,
-					apdu_length);
-	default:
-		return -ENODEV;
-	}
-}
-EXPORT_SYMBOL(st21nfcb_nci_se_io);
-
-static void st21nfcb_se_wt_timeout(unsigned long data)
-{
-	/*
-	 * No answer from the secure element
-	 * within the defined timeout.
-	 * Let's send a reset request as recovery procedure.
-	 * According to the situation, we first try to send a software reset
-	 * to the secure element. If the next command is still not
-	 * answering in time, we send to the CLF a secure element hardware
-	 * reset request.
-	 */
-	/* hardware reset managed through VCC_UICC_OUT power supply */
-	u8 param = 0x01;
-	struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
-
-	pr_debug("\n");
-
-	info->se_info.bwi_active = false;
-
-	if (!info->se_info.xch_error) {
-		info->se_info.xch_error = true;
-		nci_hci_send_event(info->ndlc->ndev, ST21NFCB_APDU_READER_GATE,
-				ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
-	} else {
-		info->se_info.xch_error = false;
-		nci_hci_send_event(info->ndlc->ndev, ST21NFCB_DEVICE_MGNT_GATE,
-				ST21NFCB_EVT_SE_HARD_RESET, &param, 1);
-	}
-	info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
-}
-
-static void st21nfcb_se_activation_timeout(unsigned long data)
-{
-	struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
-
-	pr_debug("\n");
-
-	info->se_info.se_active = false;
-
-	complete(&info->se_info.req_completion);
-}
-
-int st21nfcb_se_init(struct nci_dev *ndev)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	init_completion(&info->se_info.req_completion);
-	/* initialize timers */
-	init_timer(&info->se_info.bwi_timer);
-	info->se_info.bwi_timer.data = (unsigned long)info;
-	info->se_info.bwi_timer.function = st21nfcb_se_wt_timeout;
-	info->se_info.bwi_active = false;
-
-	init_timer(&info->se_info.se_active_timer);
-	info->se_info.se_active_timer.data = (unsigned long)info;
-	info->se_info.se_active_timer.function =
-			st21nfcb_se_activation_timeout;
-	info->se_info.se_active = false;
-
-	info->se_info.xch_error = false;
-
-	info->se_info.wt_timeout =
-		ST21NFCB_BWI_TO_TIMEOUT(ST21NFCB_ATR_DEFAULT_BWI);
-
-	return 0;
-}
-EXPORT_SYMBOL(st21nfcb_se_init);
-
-void st21nfcb_se_deinit(struct nci_dev *ndev)
-{
-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
-	if (info->se_info.bwi_active)
-		del_timer_sync(&info->se_info.bwi_timer);
-	if (info->se_info.se_active)
-		del_timer_sync(&info->se_info.se_active_timer);
-
-	info->se_info.se_active = false;
-	info->se_info.bwi_active = false;
-}
-EXPORT_SYMBOL(st21nfcb_se_deinit);
-
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.h b/drivers/nfc/st21nfcb/st21nfcb_se.h
deleted file mode 100644
index 52a3238..0000000
--- a/drivers/nfc/st21nfcb/st21nfcb_se.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * NCI based Driver for STMicroelectronics NFC Chip
- *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __LOCAL_ST21NFCB_SE_H_
-#define __LOCAL_ST21NFCB_SE_H_
-
-/*
- * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
- * sequence of at most 32 characters.
- */
-#define ST21NFCB_ESE_MAX_LENGTH		33
-#define ST21NFCB_HCI_HOST_ID_ESE	0xc0
-
-struct st21nfcb_se_info {
-	u8 atr[ST21NFCB_ESE_MAX_LENGTH];
-	struct completion req_completion;
-
-	struct timer_list bwi_timer;
-	int wt_timeout; /* in msecs */
-	bool bwi_active;
-
-	struct timer_list se_active_timer;
-	bool se_active;
-
-	bool xch_error;
-
-	se_io_cb_t cb;
-	void *cb_context;
-};
-
-int st21nfcb_se_init(struct nci_dev *ndev);
-void st21nfcb_se_deinit(struct nci_dev *ndev);
-
-int st21nfcb_nci_discover_se(struct nci_dev *ndev);
-int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
-int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
-int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
-					u8 *apdu, size_t apdu_length,
-					se_io_cb_t cb, void *cb_context);
-int st21nfcb_hci_load_session(struct nci_dev *ndev);
-void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
-				 u8 event, struct sk_buff *skb);
-void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
-			       struct sk_buff *skb);
-
-
-#endif /* __LOCAL_ST21NFCB_NCI_H_ */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index aa6a333..85b4d86 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -149,6 +149,7 @@
  */
 #define TRF7970A_QUIRK_IRQ_STATUS_READ		BIT(0)
 #define TRF7970A_QUIRK_EN2_MUST_STAY_LOW	BIT(1)
+#define TRF7970A_QUIRK_T5T_RMB_EXTRA_BYTE	BIT(2)
 
 /* Direct commands */
 #define TRF7970A_CMD_IDLE			0x00
@@ -446,6 +447,7 @@
 	u8				md_rf_tech;
 	u8				tx_cmd;
 	bool				issue_eof;
+	bool				adjust_resp_len;
 	int				en2_gpio;
 	int				en_gpio;
 	struct mutex			lock;
@@ -626,6 +628,11 @@
 		trf->aborting = false;
 	}
 
+	if (trf->adjust_resp_len) {
+		skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+		trf->adjust_resp_len = false;
+	}
+
 	trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
 
 	trf->rx_skb = NULL;
@@ -1429,10 +1436,15 @@
 			trf->iso_ctrl = iso_ctrl;
 		}
 
-		if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) &&
-				trf7970a_is_iso15693_write_or_lock(req[1]) &&
-				(req[0] & ISO15693_REQ_FLAG_OPTION))
-			trf->issue_eof = true;
+		if (trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) {
+			if (trf7970a_is_iso15693_write_or_lock(req[1]) &&
+					(req[0] & ISO15693_REQ_FLAG_OPTION))
+				trf->issue_eof = true;
+			else if ((trf->quirks &
+					TRF7970A_QUIRK_T5T_RMB_EXTRA_BYTE) &&
+				 (req[1] == ISO15693_CMD_READ_MULTIPLE_BLOCK))
+				trf->adjust_resp_len = true;
+		}
 	}
 
 	return 0;
@@ -1992,6 +2004,9 @@
 		return ret;
 	}
 
+	if (of_property_read_bool(np, "t5t-rmb-extra-byte-quirk"))
+		trf->quirks |= TRF7970A_QUIRK_T5T_RMB_EXTRA_BYTE;
+
 	if (of_property_read_bool(np, "irq-status-read-quirk"))
 		trf->quirks |= TRF7970A_QUIRK_IRQ_STATUS_READ;
 
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 0c064485..fdc60db 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -68,6 +68,9 @@
 			phy->irq = mdio->irq[addr];
 	}
 
+	if (of_property_read_bool(child, "broken-turn-around"))
+		mdio->phy_ignore_ta_mask |= 1 << addr;
+
 	/* Associate the OF node with the device structure so it
 	 * can be looked up later */
 	of_node_get(child);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 00b7d9c..2f5b518 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2150,7 +2150,7 @@
 	rc = lcs_detect(card);
 	if (rc) {
 		LCS_DBF_TEXT(2, setup, "dtctfail");
-		dev_err(&card->dev->dev,
+		dev_err(&ccwgdev->dev,
 			"Detecting a network adapter for LCS devices"
 			" failed with rc=%d (0x%x)\n", rc, rc);
 		lcs_stopcard(card);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 3abac02..ba974a2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -175,6 +175,8 @@
 	__u32 supported_funcs;
 	enum qeth_sbp_roles role;
 	__u32 hostnotification:1;
+	__u32 reflect_promisc:1;
+	__u32 reflect_promisc_primary:1;
 };
 
 static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3466d3c..5e20fba 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -645,7 +645,8 @@
 					card->info.hwtrap = 2;
 				qeth_schedule_recovery(card);
 				return NULL;
-			case IPA_CMD_SETBRIDGEPORT:
+			case IPA_CMD_SETBRIDGEPORT_IQD:
+			case IPA_CMD_SETBRIDGEPORT_OSA:
 			case IPA_CMD_ADDRESS_CHANGE_NOTIF:
 				if (card->discipline->control_event_handler
 								(card, cmd))
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 7b55768..beb4bdc 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -237,6 +237,7 @@
 	{IPA_CMD_DELGMAC,	"delgmac"},
 	{IPA_CMD_SETVLAN,	"setvlan"},
 	{IPA_CMD_DELVLAN,	"delvlan"},
+	{IPA_CMD_SETBRIDGEPORT_OSA,	"set_bridge_port(osa)"},
 	{IPA_CMD_SETCCID,	"setccid"},
 	{IPA_CMD_DELCCID,	"delccid"},
 	{IPA_CMD_MODCCID,	"modccid"},
@@ -249,7 +250,7 @@
 	{IPA_CMD_DELIP,		"delip"},
 	{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
 	{IPA_CMD_SET_DIAG_ASS,	"set_diag_ass"},
-	{IPA_CMD_SETBRIDGEPORT,	"set_bridge_port"},
+	{IPA_CMD_SETBRIDGEPORT_IQD,	"set_bridge_port(hs)"},
 	{IPA_CMD_CREATE_ADDR,	"create_addr"},
 	{IPA_CMD_DESTROY_ADDR,	"destroy_addr"},
 	{IPA_CMD_REGISTER_LOCAL_ADDR,	"register_local_addr"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1558be1..6cccc9a 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -92,6 +92,7 @@
 	IPA_CMD_DELGMAC			= 0x24,
 	IPA_CMD_SETVLAN			= 0x25,
 	IPA_CMD_DELVLAN			= 0x26,
+	IPA_CMD_SETBRIDGEPORT_OSA	= 0x2b,
 	IPA_CMD_SETCCID			= 0x41,
 	IPA_CMD_DELCCID			= 0x42,
 	IPA_CMD_MODCCID			= 0x43,
@@ -104,7 +105,7 @@
 	IPA_CMD_DELIP			= 0xb7,
 	IPA_CMD_SETADAPTERPARMS		= 0xb8,
 	IPA_CMD_SET_DIAG_ASS		= 0xb9,
-	IPA_CMD_SETBRIDGEPORT		= 0xbe,
+	IPA_CMD_SETBRIDGEPORT_IQD	= 0xbe,
 	IPA_CMD_CREATE_ADDR		= 0xc3,
 	IPA_CMD_DESTROY_ADDR		= 0xc4,
 	IPA_CMD_REGISTER_LOCAL_ADDR	= 0xd1,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0ea0869..2e65b98 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -137,7 +137,7 @@
 		rc = 0;
 		break;
 	case IPA_RC_L2_UNSUPPORTED_CMD:
-		rc = -ENOSYS;
+		rc = -EOPNOTSUPP;
 		break;
 	case IPA_RC_L2_ADDR_TABLE_FULL:
 		rc = -ENOSPC;
@@ -683,6 +683,39 @@
 	return rc ? -EINVAL : 0;
 }
 
+static void qeth_promisc_to_bridge(struct qeth_card *card)
+{
+	struct net_device *dev = card->dev;
+	enum qeth_ipa_promisc_modes promisc_mode;
+	int role;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "pmisc2br");
+
+	if (!card->options.sbp.reflect_promisc)
+		return;
+	promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON
+						: SET_PROMISC_MODE_OFF;
+	if (promisc_mode == card->info.promisc_mode)
+		return;
+
+	if (promisc_mode == SET_PROMISC_MODE_ON) {
+		if (card->options.sbp.reflect_promisc_primary)
+			role = QETH_SBP_ROLE_PRIMARY;
+		else
+			role = QETH_SBP_ROLE_SECONDARY;
+	} else
+		role = QETH_SBP_ROLE_NONE;
+
+	rc = qeth_bridgeport_setrole(card, role);
+	QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x",
+			(promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
+	if (!rc) {
+		card->options.sbp.role = role;
+		card->info.promisc_mode = promisc_mode;
+	}
+}
+
 static void qeth_l2_set_multicast_list(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -704,9 +737,10 @@
 		qeth_l2_add_mc(card, ha->addr, 1);
 
 	spin_unlock_bh(&card->mclock);
-	if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
-		return;
-	qeth_setadp_promisc_mode(card);
+	if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+		qeth_setadp_promisc_mode(card);
+	else
+		qeth_promisc_to_bridge(card);
 }
 
 static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -994,7 +1028,7 @@
 	qeth_bridgeport_query_support(card);
 	if (card->options.sbp.supported_funcs)
 		dev_info(&card->gdev->dev,
-		"The device represents a HiperSockets Bridge Capable Port\n");
+		"The device represents a Bridge Capable Port\n");
 	qeth_trace_features(card);
 
 	if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1247,7 +1281,8 @@
 					struct qeth_ipa_cmd *cmd)
 {
 	switch (cmd->hdr.command) {
-	case IPA_CMD_SETBRIDGEPORT:
+	case IPA_CMD_SETBRIDGEPORT_OSA:
+	case IPA_CMD_SETBRIDGEPORT_IQD:
 		if (cmd->data.sbp.hdr.command_code ==
 				IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
 			qeth_bridge_state_change(card, cmd);
@@ -1533,7 +1568,7 @@
 
 	if (data->hostevs.lost_event_mask) {
 		dev_info(&data->card->gdev->dev,
-"Address notification from the HiperSockets Bridge Port stopped %s (%s)\n",
+"Address notification from the Bridge Port stopped %s (%s)\n",
 			data->card->dev->name,
 			(data->hostevs.lost_event_mask == 0x01)
 			? "Overflow"
@@ -1617,70 +1652,80 @@
 	struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
 {
 	int rc;
+	int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD);
 
-	switch (cbctl->ipa_rc) {
-	case IPA_RC_SUCCESS:
+	if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
+	    (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
 		switch (cbctl->cmd_rc) {
 		case 0x0000:
 			rc = 0;
 			break;
+		case 0x2B04:
 		case 0x0004:
-			rc = -ENOSYS;
+			rc = -EOPNOTSUPP;
 			break;
+		case 0x2B0C:
 		case 0x000C: /* Not configured as bridge Port */
 			rc = -ENODEV; /* maybe not the best code here? */
 			dev_err(&card->gdev->dev,
-	"The HiperSockets device is not configured as a Bridge Port\n");
+	"The device is not configured as a Bridge Port\n");
 			break;
+		case 0x2B14:
 		case 0x0014: /* Another device is Primary */
 			switch (setcmd) {
 			case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
 				rc = -EEXIST;
 				dev_err(&card->gdev->dev,
-	"The HiperSockets LAN already has a primary Bridge Port\n");
+	"The LAN already has a primary Bridge Port\n");
 				break;
 			case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
 				rc = -EBUSY;
 				dev_err(&card->gdev->dev,
-	"The HiperSockets device is already a primary Bridge Port\n");
+	"The device is already a primary Bridge Port\n");
 				break;
 			default:
 				rc = -EIO;
 			}
 			break;
+		case 0x2B18:
 		case 0x0018: /* This device is currently Secondary */
 			rc = -EBUSY;
 			dev_err(&card->gdev->dev,
-	"The HiperSockets device is already a secondary Bridge Port\n");
+	"The device is already a secondary Bridge Port\n");
 			break;
+		case 0x2B1C:
 		case 0x001C: /* Limit for Secondary devices reached */
 			rc = -EEXIST;
 			dev_err(&card->gdev->dev,
-	"The HiperSockets LAN cannot have more secondary Bridge Ports\n");
+	"The LAN cannot have more secondary Bridge Ports\n");
 			break;
+		case 0x2B24:
 		case 0x0024: /* This device is currently Primary */
 			rc = -EBUSY;
 			dev_err(&card->gdev->dev,
-	"The HiperSockets device is already a primary Bridge Port\n");
+	"The device is already a primary Bridge Port\n");
 			break;
+		case 0x2B20:
 		case 0x0020: /* Not authorized by zManager */
 			rc = -EACCES;
 			dev_err(&card->gdev->dev,
-	"The HiperSockets device is not authorized to be a Bridge Port\n");
+	"The device is not authorized to be a Bridge Port\n");
 			break;
 		default:
 			rc = -EIO;
 		}
-		break;
-	case IPA_RC_NOTSUPP:
-		rc = -ENOSYS;
-		break;
-	case IPA_RC_UNSUPPORTED_COMMAND:
-		rc = -ENOSYS;
-		break;
-	default:
-		rc = -EIO;
-	}
+	else
+		switch (cbctl->ipa_rc) {
+		case IPA_RC_NOTSUPP:
+			rc = -EOPNOTSUPP;
+			break;
+		case IPA_RC_UNSUPPORTED_COMMAND:
+			rc = -EOPNOTSUPP;
+			break;
+		default:
+			rc = -EIO;
+		}
+
 	if (rc) {
 		QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
 		QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
@@ -1688,6 +1733,13 @@
 	return rc;
 }
 
+static inline int ipa_cmd_sbp(struct qeth_card *card)
+{
+	return (card->info.type == QETH_CARD_TYPE_IQD) ?
+		IPA_CMD_SETBRIDGEPORT_IQD :
+		IPA_CMD_SETBRIDGEPORT_OSA;
+}
+
 static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
 	struct qeth_reply *reply, unsigned long data)
 {
@@ -1719,7 +1771,7 @@
 	struct _qeth_sbp_cbctl cbctl;
 
 	QETH_CARD_TEXT(card, 2, "brqsuppo");
-	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+	iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
 	if (!iob)
 		return;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1796,7 +1848,7 @@
 	QETH_CARD_TEXT(card, 2, "brqports");
 	if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
 		return -EOPNOTSUPP;
-	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+	iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
 	if (!iob)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1808,10 +1860,9 @@
 	cmd->data.sbp.hdr.seq_no = 1;
 	rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
 				(void *)&cbctl);
-	if (rc)
+	if (rc < 0)
 		return rc;
-	rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
-	return rc;
+	return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
 }
 EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
 
@@ -1864,7 +1915,7 @@
 	}
 	if (!(card->options.sbp.supported_funcs & setcmd))
 		return -EOPNOTSUPP;
-	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+	iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
 	if (!iob)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1874,10 +1925,9 @@
 	cmd->data.sbp.hdr.seq_no = 1;
 	rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
 				(void *)&cbctl);
-	if (rc)
+	if (rc < 0)
 		return rc;
-	rc = qeth_bridgeport_makerc(card, &cbctl, setcmd);
-	return rc;
+	return qeth_bridgeport_makerc(card, &cbctl, setcmd);
 }
 
 /**
@@ -1898,7 +1948,7 @@
 		case 0x0004:
 		case 0x0100:
 		case 0x0106:
-			rc = -ENOSYS;
+			rc = -EOPNOTSUPP;
 			dev_err(&card->gdev->dev,
 				"Setting address notification failed\n");
 			break;
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 59e3aa5..52673cd 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -23,8 +23,6 @@
 	if (!card)
 		return -EINVAL;
 
-	mutex_lock(&card->conf_mutex);
-
 	if (qeth_card_hw_is_reachable(card) &&
 					card->options.sbp.supported_funcs)
 		rc = qeth_bridgeport_query_ports(card,
@@ -59,8 +57,6 @@
 			rc = sprintf(buf, "%s\n", word);
 	}
 
-	mutex_unlock(&card->conf_mutex);
-
 	return rc;
 }
 
@@ -90,7 +86,9 @@
 
 	mutex_lock(&card->conf_mutex);
 
-	if (qeth_card_hw_is_reachable(card)) {
+	if (card->options.sbp.reflect_promisc) /* Forbid direct manipulation */
+		rc = -EPERM;
+	else if (qeth_card_hw_is_reachable(card)) {
 		rc = qeth_bridgeport_setrole(card, role);
 		if (!rc)
 			card->options.sbp.role = role;
@@ -123,12 +121,8 @@
 	if (!card)
 		return -EINVAL;
 
-	mutex_lock(&card->conf_mutex);
-
 	enabled = card->options.sbp.hostnotification;
 
-	mutex_unlock(&card->conf_mutex);
-
 	return sprintf(buf, "%d\n", enabled);
 }
 
@@ -167,10 +161,72 @@
 			qeth_bridgeport_hostnotification_show,
 			qeth_bridgeport_hostnotification_store);
 
+static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *state;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->options.sbp.reflect_promisc) {
+		if (card->options.sbp.reflect_promisc_primary)
+			state = "primary";
+		else
+			state = "secondary";
+	} else
+		state = "none";
+
+	return sprintf(buf, "%s\n", state);
+}
+
+static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int enable, primary;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	if (sysfs_streq(buf, "none")) {
+		enable = 0;
+		primary = 0;
+	} else if (sysfs_streq(buf, "primary")) {
+		enable = 1;
+		primary = 1;
+	} else if (sysfs_streq(buf, "secondary")) {
+		enable = 1;
+		primary = 0;
+	} else
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+
+	if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
+		rc = -EPERM;
+	else {
+		card->options.sbp.reflect_promisc = enable;
+		card->options.sbp.reflect_promisc_primary = primary;
+		rc = 0;
+	}
+
+	mutex_unlock(&card->conf_mutex);
+
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_reflect_promisc, 0644,
+			qeth_bridgeport_reflect_show,
+			qeth_bridgeport_reflect_store);
+
 static struct attribute *qeth_l2_bridgeport_attrs[] = {
 	&dev_attr_bridge_role.attr,
 	&dev_attr_bridge_state.attr,
 	&dev_attr_bridge_hostnotify.attr,
+	&dev_attr_bridge_reflect_promisc.attr,
 	NULL,
 };
 
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 04e42c6..70eb2f61 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3198,8 +3198,7 @@
 	netdev_features_t features)
 {
 	struct qeth_card *card = dev->ml_priv;
-	u32 changed = dev->features ^ features;
-	int err;
+	netdev_features_t changed = dev->features ^ features;
 
 	if (!(changed & NETIF_F_RXCSUM))
 		return 0;
@@ -3208,11 +3207,7 @@
 	    card->state == CARD_STATE_RECOVER)
 		return 0;
 
-	err = qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM);
-	if (err)
-		dev->features = features ^ NETIF_F_RXCSUM;
-
-	return err;
+	return qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
 }
 
 static const struct ethtool_ops qeth_l3_ethtool_ops = {
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 1d42e4f8..f3bb7af 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -732,7 +732,7 @@
 	}
 	ndev = n->dev;
 
-	if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+	if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
 		pr_info("multi-cast route %pI6 port %u, dev %s.\n",
 			daddr6->sin6_addr.s6_addr,
 			ntohs(daddr6->sin6_port), ndev->name);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 5c9e680..e32d24e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -27,6 +27,7 @@
 #include <linux/moduleparam.h>
 #include <generated/utsrelease.h>
 #include <linux/utsname.h>
+#include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 8f6d0fb..a7cfc27 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/aer.h>
 #include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
 
 #include <net/tcp.h>
 #include <scsi/scsi.h>
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index bc95ce8..5ab2f69 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -379,7 +379,7 @@
 						GFP_ATOMIC);
 		} else {
 			cfg80211_disconnected(padapter->pnetdev, 0, NULL,
-					      0, GFP_ATOMIC);
+					      0, false, GFP_ATOMIC);
 		}
 	}
 }
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 0343ae3..458bc34 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1524,21 +1524,12 @@
 	struct vnt_private *priv = hw->priv;
 	u8 rx_mode = 0;
 
-	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
-		FIF_BCN_PRBRESP_PROMISC;
+	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
 
 	VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
 
 	dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
 
-	if (changed_flags & FIF_PROMISC_IN_BSS) {
-		/* unconditionally log net taps */
-		if (*total_flags & FIF_PROMISC_IN_BSS)
-			rx_mode |= RCR_UNICAST;
-		else
-			rx_mode &= ~RCR_UNICAST;
-	}
-
 	if (changed_flags & FIF_ALLMULTI) {
 		if (*total_flags & FIF_ALLMULTI) {
 			unsigned long flags;
@@ -1802,10 +1793,10 @@
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
 
-	priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_TIMING_BEACON_ONLY;
+	ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
+	ieee80211_hw_set(priv->hw, SIGNAL_DBM);
+	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
 
 	priv->hw->max_signal = 100;
 
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ab3ab84..2ccfbff 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -785,8 +785,7 @@
 	u8 rx_mode = 0;
 	int rc;
 
-	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
-		FIF_BCN_PRBRESP_PROMISC;
+	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
 
 	rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
 		MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
@@ -796,14 +795,6 @@
 
 	dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
 
-	if (changed_flags & FIF_PROMISC_IN_BSS) {
-		/* unconditionally log net taps */
-		if (*total_flags & FIF_PROMISC_IN_BSS)
-			rx_mode |= RCR_UNICAST;
-		else
-			rx_mode &= ~RCR_UNICAST;
-	}
-
 	if (changed_flags & FIF_ALLMULTI) {
 		if (*total_flags & FIF_ALLMULTI) {
 			if (priv->mc_list_count > 2)
@@ -987,10 +978,10 @@
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
 
-	priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_TIMING_BEACON_ONLY;
+	ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
+	ieee80211_hw_set(priv->hw, SIGNAL_DBM);
+	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+	ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
 
 	priv->hw->max_signal = 100;
 
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 7c87aec..342e2b3 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -722,7 +722,7 @@
 void prism2_disconnected(wlandevice_t *wlandev)
 {
 	cfg80211_disconnected(wlandev->netdev, 0, NULL,
-		0, GFP_KERNEL);
+		0, false, GFP_KERNEL);
 }
 
 void prism2_roamed(wlandevice_t *wlandev)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4672bb1..a3fba36 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -21,6 +21,7 @@
 #include <linux/crypto.h>
 #include <linux/completion.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <linux/idr.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi_proto.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d48379a..6641713 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <linux/falloc.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 7ca6423..8e5fa29 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/vmalloc.h>
 #include <linux/file.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index fdf8672..0b4e242 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -34,6 +34,7 @@
 #include <linux/cdrom.h>
 #include <linux/module.h>
 #include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
 #include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 5efef9a..549af98 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/timer.h>
 #include <linux/parser.h>
+#include <linux/vmalloc.h>
 #include <linux/uio_driver.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 5b30d27..55722fe 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -35,6 +35,7 @@
 #include <linux/compat.h>
 #include <linux/eventfd.h>
 #include <linux/fs.h>
+#include <linux/vmalloc.h>
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi_common.h>
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 3a57a1b..b506428 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -85,7 +85,7 @@
 		return -ENOMEM;
 	}
 
-	ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
+	ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
 	if (ret < 0) {
 		destroy_workqueue(afs_async_calls);
 		_leave(" = %d [socket]", ret);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index d08e079..754fd6c 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -921,8 +921,8 @@
 	mutex_unlock(&connections_lock);
 
 	memset(&peeraddr, 0, sizeof(peeraddr));
-	result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
-				  IPPROTO_TCP, &newsock);
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_STREAM, IPPROTO_TCP, &newsock);
 	if (result < 0)
 		return -ENOMEM;
 
@@ -1173,8 +1173,8 @@
 		goto out;
 
 	/* Create a socket to communicate with */
-	result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
-				  IPPROTO_TCP, &sock);
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (result < 0)
 		goto out_err;
 
@@ -1258,8 +1258,8 @@
 		addr_len = sizeof(struct sockaddr_in6);
 
 	/* Create a socket to communicate with */
-	result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
-				  IPPROTO_TCP, &sock);
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (result < 0) {
 		log_print("Can't create listening comms socket");
 		goto create_out;
@@ -1365,8 +1365,8 @@
 
 	log_print("Using SCTP for communications");
 
-	result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET,
-				  IPPROTO_SCTP, &sock);
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
 	if (result < 0) {
 		log_print("Can't create comms socket, check SCTP is loaded");
 		goto out;
diff --git a/fs/splice.c b/fs/splice.c
index bfe62ae..4f355a1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -261,6 +261,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(splice_to_pipe);
 
 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
 {
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index ebd63fd..dc4254b 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -289,5 +289,7 @@
 #define UBI32_CORE1_CLK				279
 #define UBI32_CORE2_CLK				280
 #define EBI2_AON_CLK				281
+#define NSSTCM_CLK_SRC				282
+#define NSSTCM_CLK				283
 
 #endif
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
new file mode 100644
index 0000000..172744a
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -0,0 +1,45 @@
+/*
+ * Device Tree constants for the Texas Instruments DP83867 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright:   (C) 2015 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83867_H
+#define _DT_BINDINGS_TI_DP83867_H
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB	0x00
+#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB	0x01
+#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB	0x02
+#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB	0x03
+
+/* RGMIIDCTL internal delay for rx and tx */
+#define	DP83867_RGMIIDCTL_250_PS	0x0
+#define	DP83867_RGMIIDCTL_500_PS	0x1
+#define	DP83867_RGMIIDCTL_750_PS	0x2
+#define	DP83867_RGMIIDCTL_1_NS		0x3
+#define	DP83867_RGMIIDCTL_1_25_NS	0x4
+#define	DP83867_RGMIIDCTL_1_50_NS	0x5
+#define	DP83867_RGMIIDCTL_1_75_NS	0x6
+#define	DP83867_RGMIIDCTL_2_00_NS	0x7
+#define	DP83867_RGMIIDCTL_2_25_NS	0x8
+#define	DP83867_RGMIIDCTL_2_50_NS	0x9
+#define	DP83867_RGMIIDCTL_2_75_NS	0xa
+#define	DP83867_RGMIIDCTL_3_00_NS	0xb
+#define	DP83867_RGMIIDCTL_3_25_NS	0xc
+#define	DP83867_RGMIIDCTL_3_50_NS	0xd
+#define	DP83867_RGMIIDCTL_3_75_NS	0xe
+#define	DP83867_RGMIIDCTL_4_00_NS	0xf
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 0ad5ef9..de9c814 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -129,4 +129,47 @@
 #define USB30_1_PHY_RESET				112
 #define NSSFB0_RESET					113
 #define NSSFB1_RESET					114
+#define UBI32_CORE1_CLKRST_CLAMP_RESET			115
+#define UBI32_CORE1_CLAMP_RESET				116
+#define UBI32_CORE1_AHB_RESET				117
+#define UBI32_CORE1_AXI_RESET				118
+#define UBI32_CORE2_CLKRST_CLAMP_RESET			119
+#define UBI32_CORE2_CLAMP_RESET				120
+#define UBI32_CORE2_AHB_RESET				121
+#define UBI32_CORE2_AXI_RESET				122
+#define GMAC_CORE1_RESET				123
+#define GMAC_CORE2_RESET				124
+#define GMAC_CORE3_RESET				125
+#define GMAC_CORE4_RESET				126
+#define GMAC_AHB_RESET					127
+#define NSS_CH0_RST_RX_CLK_N_RESET			128
+#define NSS_CH0_RST_TX_CLK_N_RESET			129
+#define NSS_CH0_RST_RX_125M_N_RESET			130
+#define NSS_CH0_HW_RST_RX_125M_N_RESET			131
+#define NSS_CH0_RST_TX_125M_N_RESET			132
+#define NSS_CH1_RST_RX_CLK_N_RESET			133
+#define NSS_CH1_RST_TX_CLK_N_RESET			134
+#define NSS_CH1_RST_RX_125M_N_RESET			135
+#define NSS_CH1_HW_RST_RX_125M_N_RESET			136
+#define NSS_CH1_RST_TX_125M_N_RESET			137
+#define NSS_CH2_RST_RX_CLK_N_RESET			138
+#define NSS_CH2_RST_TX_CLK_N_RESET			139
+#define NSS_CH2_RST_RX_125M_N_RESET			140
+#define NSS_CH2_HW_RST_RX_125M_N_RESET			141
+#define NSS_CH2_RST_TX_125M_N_RESET			142
+#define NSS_CH3_RST_RX_CLK_N_RESET			143
+#define NSS_CH3_RST_TX_CLK_N_RESET			144
+#define NSS_CH3_RST_RX_125M_N_RESET			145
+#define NSS_CH3_HW_RST_RX_125M_N_RESET			146
+#define NSS_CH3_RST_TX_125M_N_RESET			147
+#define NSS_RST_RX_250M_125M_N_RESET			148
+#define NSS_RST_TX_250M_125M_N_RESET			149
+#define NSS_QSGMII_TXPI_RST_N_RESET			150
+#define NSS_QSGMII_CDR_RST_N_RESET			151
+#define NSS_SGMII2_CDR_RST_N_RESET			152
+#define NSS_SGMII3_CDR_RST_N_RESET			153
+#define NSS_CAL_PRBS_RST_N_RESET			154
+#define NSS_LCKDT_RST_N_RESET				155
+#define NSS_SRDS_N_RESET				156
+
 #endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index e34f906..2ff4a99 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -305,6 +305,15 @@
 
 extern void bcma_driver_unregister(struct bcma_driver *drv);
 
+/* module_bcma_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_bcma_driver(__bcma_driver) \
+	module_driver(__bcma_driver, bcma_driver_register, \
+			bcma_driver_unregister)
+
 /* Set a fallback SPROM.
  * See kdoc at the function definition for complete documentation. */
 extern int bcma_arch_register_fallback_sprom(
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 5ba6918..9657f11 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -246,7 +246,18 @@
 }
 #endif
 
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
 extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+#else
+static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
+{
+	return -ENOTSUPP;
+}
+static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
+{
+	return -ENOTSUPP;
+}
+#endif
 
 #endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d5cda06..4383476 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -105,7 +105,8 @@
 	 */
 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
 
-	u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
+	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
+				  int src_reg, int ctx_off,
 				  struct bpf_insn *insn);
 };
 
@@ -123,15 +124,41 @@
 	const struct bpf_verifier_ops *ops;
 	struct bpf_map **used_maps;
 	struct bpf_prog *prog;
-	struct work_struct work;
+	union {
+		struct work_struct work;
+		struct rcu_head	rcu;
+	};
 };
 
+struct bpf_array {
+	struct bpf_map map;
+	u32 elem_size;
+	/* 'ownership' of prog_array is claimed by the first program that
+	 * is going to use this map or by the first program which FD is stored
+	 * in the map to make sure that all callers and callees have the same
+	 * prog_type and JITed flag
+	 */
+	enum bpf_prog_type owner_prog_type;
+	bool owner_jited;
+	union {
+		char value[0] __aligned(8);
+		struct bpf_prog *prog[0] __aligned(8);
+	};
+};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_prog_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+
 #ifdef CONFIG_BPF_SYSCALL
 void bpf_register_prog_type(struct bpf_prog_type_list *tl);
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
 void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get(struct fd f);
 void bpf_map_put(struct bpf_map *map);
@@ -160,5 +187,10 @@
 
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
+extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
+extern const struct bpf_func_proto bpf_get_current_comm_proto;
 
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 656da2a..697ca77 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
 #ifndef _LINUX_BRCMPHY_H
 #define _LINUX_BRCMPHY_H
 
+#include <linux/phy.h>
+
+/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
+ * to configure the switch internal registers via MDIO accesses.
+ */
+#define BRCM_PSEUDO_PHY_ADDR           30
+
 #define PHY_ID_BCM50610			0x0143bd60
 #define PHY_ID_BCM50610M		0x0143bd70
 #define PHY_ID_BCM5241			0x0143bc30
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 606563e..9012f87 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -110,7 +110,29 @@
  */
 static inline bool is_multicast_ether_addr(const u8 *addr)
 {
-	return 0x01 & addr[0];
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	u32 a = *(const u32 *)addr;
+#else
+	u16 a = *(const u16 *)addr;
+#endif
+#ifdef __BIG_ENDIAN
+	return 0x01 & (a >> ((sizeof(a) * 8) - 8));
+#else
+	return 0x01 & a;
+#endif
+}
+
+static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#ifdef __BIG_ENDIAN
+	return 0x01 & ((*(const u64 *)addr) >> 56);
+#else
+	return 0x01 & (*(const u64 *)addr);
+#endif
+#else
+	return is_multicast_ether_addr(addr);
+#endif
 }
 
 /**
@@ -169,6 +191,24 @@
 }
 
 /**
+ * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
+ * @proto: Ethertype/length value to be tested
+ *
+ * Check that the value from the Ethertype/length field is a valid Ethertype.
+ *
+ * Return true if the valid is an 802.3 supported Ethertype.
+ */
+static inline bool eth_proto_is_802_3(__be16 proto)
+{
+#ifndef __BIG_ENDIAN
+	/* if CPU is little endian mask off bits representing LSB */
+	proto &= htons(0xFF00);
+#endif
+	/* cast both to u16 and compare since LSB can be ignored */
+	return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
+}
+
+/**
  * eth_random_addr - Generate software assigned random Ethernet address
  * @addr: Pointer to a six-byte array containing the Ethernet address
  *
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fa11b3a..17724f6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -207,6 +207,16 @@
 		.off   = OFF,					\
 		.imm   = 0 })
 
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF)			\
+	((struct bpf_insn) {					\
+		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,	\
+		.dst_reg = DST,					\
+		.src_reg = SRC,					\
+		.off   = OFF,					\
+		.imm   = 0 })
+
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
@@ -267,6 +277,14 @@
 		.off   = 0,					\
 		.imm   = 0 })
 
+/* Internal classic blocks for direct assignment */
+
+#define __BPF_STMT(CODE, K)					\
+	((struct sock_filter) BPF_STMT(CODE, K))
+
+#define __BPF_JUMP(CODE, K, JT, JF)				\
+	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
+
 #define bytes_to_bpf_size(bytes)				\
 ({								\
 	int bpf_size = -EINVAL;					\
@@ -360,12 +378,9 @@
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
 void bpf_prog_free(struct bpf_prog *fp);
 
-int bpf_convert_filter(struct sock_filter *prog, int len,
-		       struct bpf_insn *new_prog, int *new_len);
-
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 				  gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@
 	__bpf_prog_free(fp);
 }
 
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
+				       unsigned int flen);
+
 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+			      bpf_aux_classic_check_t trans);
 void bpf_prog_destroy(struct bpf_prog *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 int sk_attach_bpf(u32 ufd, struct sock *sk);
 int sk_detach_filter(struct sock *sk);
-
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
 		  unsigned int len);
 
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 15928f0..6ba7cf2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -368,6 +368,11 @@
 extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
+struct page_frag_cache;
+extern void *__alloc_page_frag(struct page_frag_cache *nc,
+			       unsigned int fragsz, gfp_t gfp_mask);
+extern void __free_page_frag(void *addr);
+
 extern void __free_kmem_pages(struct page *page, unsigned int order);
 extern void free_kmem_pages(unsigned long addr, unsigned int order);
 
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 8872ca1..1dc1f4e 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -225,15 +225,13 @@
  * ieee802154_is_valid_psdu_len - check if extended addr is valid
  * @addr: extended addr to check
  */
-static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
 {
-	/* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
-	 * is used internally as extended to short address broadcast mapping.
-	 * This is currently a workaround because neighbor discovery can't
-	 * deal with short addresses types right now.
+	/* Bail out if the address is all zero, or if the group
+	 * address bit is set.
 	 */
 	return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
-		(addr != cpu_to_le64(0xffffffffffffffffULL)));
+		!(addr & cpu_to_le64(0x0100000000000000ULL)));
 }
 
 /**
@@ -244,9 +242,9 @@
 {
 	get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
 
-	/* toggle some bit if we hit an invalid extended addr */
-	if (!ieee802154_is_valid_extended_addr(*addr))
-		((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+	/* clear the group bit, and set the locally administered bit */
+	((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
+	((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
 }
 
 #endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index da49299..ae5d0d2 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -5,6 +5,15 @@
 
 
 /* We don't want this structure exposed to user space */
+struct ifla_vf_stats {
+	__u64 rx_packets;
+	__u64 tx_packets;
+	__u64 rx_bytes;
+	__u64 tx_bytes;
+	__u64 broadcast;
+	__u64 multicast;
+};
+
 struct ifla_vf_info {
 	__u32 vf;
 	__u8 mac[32];
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6f6929e..a4ccc31 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,7 @@
  * Maximum times a macvtap device can be opened. This can be used to
  * configure the number of receive queue, e.g. for multiqueue virtio.
  */
-#define MAX_MACVTAP_QUEUES	16
+#define MAX_MACVTAP_QUEUES	256
 
 #define MACVLAN_MC_FILTER_BITS	8
 #define MACVLAN_MC_FILTER_SZ	(1 << MACVLAN_MC_FILTER_BITS)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 66a7d76..b49cf92 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -74,7 +74,7 @@
 struct module;
 
 struct pppox_proto {
-	int		(*create)(struct net *net, struct socket *sock);
+	int		(*create)(struct net *net, struct socket *sock, int kern);
 	int		(*ioctl)(struct socket *sock, unsigned int cmd,
 				 unsigned long arg);
 	struct module	*owner;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 920e445..67ce5bd 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -416,7 +416,7 @@
 /**
  * __vlan_get_tag - get the VLAN ID that is part of the payload
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if the skb is not of VLAN type
  */
@@ -435,7 +435,7 @@
 /**
  * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if @skb->vlan_tci is not set correctly
  */
@@ -456,7 +456,7 @@
 /**
  * vlan_get_tag - get the VLAN ID from the skb
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if the skb is not VLAN tagged
  */
@@ -539,7 +539,7 @@
 	 */
 
 	proto = vhdr->h_vlan_encapsulated_proto;
-	if (ntohs(proto) >= ETH_P_802_3_MIN) {
+	if (eth_proto_is_802_3(proto)) {
 		skb->protocol = proto;
 		return;
 	}
@@ -628,4 +628,24 @@
 	return features;
 }
 
+/**
+ * compare_vlan_header - Compare two vlan headers
+ * @h1: Pointer to vlan header
+ * @h2: Pointer to vlan header
+ *
+ * Compare two vlan headers, returns 0 if equal.
+ *
+ * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ */
+static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
+						const struct vlan_hdr *h2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	return *(u32 *)h1 ^ *(u32 *)h2;
+#else
+	return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
+	       ((__force u32)h1->h_vlan_encapsulated_proto ^
+		(__force u32)h2->h_vlan_encapsulated_proto);
+#endif
+}
 #endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 2c677af..193ad48 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -130,5 +130,6 @@
 extern void ip_mc_remap(struct in_device *);
 extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
 extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
 
 #endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index ac48b10..0e707f0 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -24,6 +24,7 @@
 					  struct inet_diag_msg *r,
 					  void *info);
 	__u16		idiag_type;
+	__u16		idiag_info_size;
 };
 
 struct inet_connection_sock;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0a21fbe..a4328ce 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -120,6 +120,9 @@
 	 || (!IN_DEV_FORWARD(in_dev) && \
 	  IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
 
+#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
+	IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+
 #define IN_DEV_ARPFILTER(in_dev)	IN_DEV_ORCONF((in_dev), ARPFILTER)
 #define IN_DEV_ARP_ACCEPT(in_dev)	IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
 #define IN_DEV_ARP_ANNOUNCE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index 66c30a7..11f00cd 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -23,7 +23,8 @@
 	bool mdio_active_low;
 	bool mdo_active_low;
 
-	unsigned int phy_mask;
+	u32 phy_mask;
+	u32 phy_ignore_ta_mask;
 	int irqs[PHY_MAX_ADDR];
 	/* reset callback */
 	int (*reset)(struct mii_bus *bus);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index f62e7cf..58391f2 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -35,6 +35,8 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/if_link.h>
+#include <linux/mlx4/device.h>
+#include <linux/netdevice.h>
 
 enum {
 	/* initialization and general commands */
@@ -300,6 +302,10 @@
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
 
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+			   struct mlx4_counter *counter_stats, int reset);
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+		      struct ifla_vf_stats *vf_stats);
 u32 mlx4_comm_get_version(void);
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f94984f..fd13c1c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,8 +46,9 @@
 
 #define MAX_MSIX_P_PORT		17
 #define MAX_MSIX		64
-#define MSIX_LEGACY_SZ		4
 #define MIN_MSIX_P_PORT		5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+					 (dev_cap).num_ports * MIN_MSIX_P_PORT)
 
 #define MLX4_MAX_100M_UNITS_VAL		255	/*
 						 * work around: can't set values
@@ -528,7 +529,6 @@
 	int			num_eqs;
 	int			reserved_eqs;
 	int			num_comp_vectors;
-	int			comp_pool;
 	int			num_mpts;
 	int			max_fmr_maps;
 	int			num_mtts;
@@ -771,6 +771,14 @@
 	struct mlx4_eth_av	eth;
 };
 
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do {	\
+	if ((value) > U32_MAX)				\
+		counter = cpu_to_be32(U32_MAX);		\
+	else						\
+		counter = cpu_to_be32(value);		\
+} while (0)
+
 struct mlx4_counter {
 	u8	reserved1[3];
 	u8	counter_mode;
@@ -963,6 +971,7 @@
 			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
 
 #define MLX4_INVALID_SLAVE_ID	0xFF
+#define MLX4_SINK_COUNTER_INDEX(dev)	(dev->caps.max_counters - 1)
 
 void handle_port_mgmt_change_event(struct work_struct *work);
 
@@ -1338,10 +1347,13 @@
 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
 int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
-		   int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
 
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
 int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
 
 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
@@ -1350,6 +1362,7 @@
 
 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
 
 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
 			 int port);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2695ced..abc4767 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -169,6 +169,9 @@
 		       struct mlx5_query_cq_mbox_out *out);
 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 			struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+				   struct mlx5_core_cq *cq, u16 cq_period,
+				   u16 cq_max_count);
 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
 
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index abf65c7..b943cd9 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -35,6 +35,7 @@
 
 #include <linux/types.h>
 #include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
 
 #if defined(__LITTLE_ENDIAN)
 #define MLX5_SET_HOST_ENDIANNESS	0
@@ -58,6 +59,8 @@
 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
 
@@ -70,6 +73,14 @@
 		     << __mlx5_dw_bit_off(typ, fld))); \
 } while (0)
 
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
+	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+		     << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
 __mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@
 
 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
 
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+	u64 ___t = MLX5_GET64(typ, p, fld); \
+	pr_debug(#fld " = 0x%llx\n", ___t); \
+	___t; \
+})
+
 enum {
 	MLX5_MAX_COMMANDS		= 32,
 	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
@@ -115,6 +132,10 @@
 };
 
 enum {
+	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
 	MLX5_MIN_PKEY_TABLE_SIZE = 128,
 	MLX5_MAX_LOG_PKEY_TABLE  = 5,
 };
@@ -264,6 +285,7 @@
 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
 	MLX5_OPCODE_SEND		= 0x0a,
 	MLX5_OPCODE_SEND_IMM		= 0x0b,
+	MLX5_OPCODE_LSO			= 0x0e,
 	MLX5_OPCODE_RDMA_READ		= 0x10,
 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
@@ -312,13 +334,6 @@
 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
 };
 
-enum {
-	HCA_CAP_OPMOD_GET_MAX	= 0,
-	HCA_CAP_OPMOD_GET_CUR	= 1,
-	HCA_CAP_OPMOD_GET_ODP_MAX = 4,
-	HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
 struct mlx5_inbox_hdr {
 	__be16		opcode;
 	u8		rsvd[4];
@@ -541,6 +556,10 @@
 	u8		sig;
 };
 
+enum {
+	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
 struct mlx5_err_cqe {
 	u8	rsvd0[32];
 	__be32	srqn;
@@ -554,13 +573,22 @@
 };
 
 struct mlx5_cqe64 {
-	u8		rsvd0[17];
+	u8		rsvd0[4];
+	u8		lro_tcppsh_abort_dupack;
+	u8		lro_min_ttl;
+	__be16		lro_tcp_win;
+	__be32		lro_ack_seq_num;
+	__be32		rss_hash_result;
+	u8		rss_hash_type;
 	u8		ml_path;
-	u8		rsvd20[4];
+	u8		rsvd20[2];
+	__be16		check_sum;
 	__be16		slid;
 	__be32		flags_rqpn;
-	u8		rsvd28[4];
-	__be32		srqn;
+	u8		hds_ip_ext;
+	u8		l4_hdr_type_etc;
+	__be16		vlan_info;
+	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
 	__be32		imm_inval_pkey;
 	u8		rsvd40[4];
 	__be32		byte_cnt;
@@ -571,6 +599,40 @@
 	u8		op_own;
 };
 
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+	return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+	CQE_L4_HDR_TYPE_NONE			= 0x0,
+	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
+	CQE_L4_HDR_TYPE_UDP			= 0x2,
+	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
+	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
+};
+
+enum {
+	CQE_RSS_HTYPE_IP	= 0x3 << 6,
+	CQE_RSS_HTYPE_L4	= 0x3 << 2,
+};
+
+enum {
+	CQE_L2_OK	= 1 << 0,
+	CQE_L3_OK	= 1 << 1,
+	CQE_L4_OK	= 1 << 2,
+};
+
 struct mlx5_sig_err_cqe {
 	u8		rsvd0[16];
 	__be32		expected_trans_sig;
@@ -996,4 +1058,135 @@
 	u8                      rsvd[8];
 };
 
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+	VPORT_STATE_DOWN		= 0x0,
+	VPORT_STATE_UP			= 0x1,
+};
+
+enum {
+	MLX5_L3_PROT_TYPE_IPV4		= 0,
+	MLX5_L3_PROT_TYPE_IPV6		= 1,
+};
+
+enum {
+	MLX5_L4_PROT_TYPE_TCP		= 0,
+	MLX5_L4_PROT_TYPE_UDP		= 1,
+};
+
+enum {
+	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
+	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
+	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
+	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
+	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
+};
+
+enum {
+	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
+	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
+	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
+
+};
+
+enum {
+	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
+	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
+};
+
+enum {
+	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
+	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
+	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
+};
+
+enum {
+	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+	HCA_CAP_OPMOD_GET_MAX	= 0,
+	HCA_CAP_OPMOD_GET_CUR	= 1,
+};
+
+enum mlx5_cap_type {
+	MLX5_CAP_GENERAL = 0,
+	MLX5_CAP_ETHERNET_OFFLOADS,
+	MLX5_CAP_ODP,
+	MLX5_CAP_ATOMIC,
+	MLX5_CAP_ROCE,
+	MLX5_CAP_IPOIB_OFFLOADS,
+	MLX5_CAP_EOIB_OFFLOADS,
+	MLX5_CAP_FLOW_TABLE,
+	/* NUM OF CAP Types */
+	MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+	MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+	MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+	MLX5_GET(per_protocol_networking_offload_caps,\
+		 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+	MLX5_GET(per_protocol_networking_offload_caps,\
+		 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+	MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+	MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+	MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+	MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+	MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+	MLX5_CMD_STAT_OK			= 0x0,
+	MLX5_CMD_STAT_INT_ERR			= 0x1,
+	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
+	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
+	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
+	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
+	MLX5_CMD_STAT_RES_BUSY			= 0x6,
+	MLX5_CMD_STAT_LIM_ERR			= 0x8,
+	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
+	MLX5_CMD_STAT_IX_ERR			= 0xa,
+	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
+	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
+	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
+	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
+	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
+	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+		return 0;
+	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
 #endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9ec7c93..5722d88 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,7 +44,6 @@
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
 
 enum {
 	MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@
 };
 
 enum {
-	MLX5_MAX_EQ_NAME	= 32
+	MLX5_MAX_IRQ_NAME	= 32
 };
 
 enum {
@@ -108,6 +107,7 @@
 	MLX5_REG_PUDE		 = 0x5009,
 	MLX5_REG_PMPE		 = 0x5010,
 	MLX5_REG_PELC		 = 0x500e,
+	MLX5_REG_PVLC		 = 0x500f,
 	MLX5_REG_PMLP		 = 0, /* TBD */
 	MLX5_REG_NODE_DESC	 = 0x6001,
 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@
 	MLX5_DEV_EVENT_CLIENT_REREG,
 };
 
+enum mlx5_port_status {
+	MLX5_PORT_UP        = 1 << 1,
+	MLX5_PORT_DOWN      = 1 << 2,
+};
+
 struct mlx5_uuar_info {
 	struct mlx5_uar	       *uars;
 	int			num_uars;
@@ -269,56 +274,7 @@
 struct mlx5_port_caps {
 	int	gid_table_len;
 	int	pkey_table_len;
-};
-
-struct mlx5_general_caps {
-	u8	log_max_eq;
-	u8	log_max_cq;
-	u8	log_max_qp;
-	u8	log_max_mkey;
-	u8	log_max_pd;
-	u8	log_max_srq;
-	u8	log_max_strq;
-	u8	log_max_mrw_sz;
-	u8	log_max_bsf_list_size;
-	u8	log_max_klm_list_size;
-	u32	max_cqes;
-	int	max_wqes;
-	u32	max_eqes;
-	u32	max_indirection;
-	int	max_sq_desc_sz;
-	int	max_rq_desc_sz;
-	int	max_dc_sq_desc_sz;
-	u64	flags;
-	u16	stat_rate_support;
-	int	log_max_msg;
-	int	num_ports;
-	u8	log_max_ra_res_qp;
-	u8	log_max_ra_req_qp;
-	int	max_srq_wqes;
-	int	bf_reg_size;
-	int	bf_regs_per_page;
-	struct mlx5_port_caps	port[MLX5_MAX_PORTS];
-	u8			ext_port_cap[MLX5_MAX_PORTS];
-	int	max_vf;
-	u32	reserved_lkey;
-	u8	local_ca_ack_delay;
-	u8	log_max_mcg;
-	u32	max_qp_mcg;
-	int	min_page_sz;
-	int	pd_cap;
-	u32	max_qp_counters;
-	u32	pkey_table_size;
-	u8	log_max_ra_req_dc;
-	u8	log_max_ra_res_dc;
-	u32	uar_sz;
-	u8	min_log_pg_sz;
-	u8	log_max_xrcd;
-	u16	log_uar_page_sz;
-};
-
-struct mlx5_caps {
-	struct mlx5_general_caps gen;
+	u8	ext_port_cap;
 };
 
 struct mlx5_cmd_mailbox {
@@ -334,8 +290,6 @@
 
 struct mlx5_buf {
 	struct mlx5_buf_list	direct;
-	struct mlx5_buf_list   *page_list;
-	int			nbufs;
 	int			npages;
 	int			size;
 	u8			page_shift;
@@ -351,7 +305,6 @@
 	u8			eqn;
 	int			nent;
 	u64			mask;
-	char			name[MLX5_MAX_EQ_NAME];
 	struct list_head	list;
 	int			index;
 	struct mlx5_rsc_debug	*dbg;
@@ -387,6 +340,8 @@
 
 enum mlx5_res_type {
 	MLX5_RES_QP,
+	MLX5_RES_SRQ,
+	MLX5_RES_XSRQ,
 };
 
 struct mlx5_core_rsc_common {
@@ -396,6 +351,7 @@
 };
 
 struct mlx5_core_srq {
+	struct mlx5_core_rsc_common	common; /* must be first */
 	u32		srqn;
 	int		max;
 	int		max_gs;
@@ -414,7 +370,6 @@
 	struct mlx5_eq		pages_eq;
 	struct mlx5_eq		async_eq;
 	struct mlx5_eq		cmd_eq;
-	struct msix_entry	*msix_arr;
 	int			num_comp_vectors;
 	/* protect EQs list
 	 */
@@ -467,9 +422,16 @@
 	struct radix_tree_root	tree;
 };
 
+struct mlx5_irq_info {
+	cpumask_var_t mask;
+	char name[MLX5_MAX_IRQ_NAME];
+};
+
 struct mlx5_priv {
 	char			name[MLX5_MAX_NAME_LEN];
 	struct mlx5_eq_table	eq_table;
+	struct msix_entry	*msix_arr;
+	struct mlx5_irq_info	*irq_info;
 	struct mlx5_uuar_info	uuari;
 	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
@@ -520,7 +482,9 @@
 	u8			rev_id;
 	char			board_id[MLX5_BOARD_ID_LEN];
 	struct mlx5_cmd		cmd;
-	struct mlx5_caps	caps;
+	struct mlx5_port_caps	port_caps[MLX5_MAX_PORTS];
+	u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+	u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 	phys_addr_t		iseg_base;
 	struct mlx5_init_seg __iomem *iseg;
 	void			(*event) (struct mlx5_core_dev *dev,
@@ -529,6 +493,7 @@
 	struct mlx5_priv	priv;
 	struct mlx5_profile	*profile;
 	atomic_t		num_qps;
+	u32			issi;
 };
 
 struct mlx5_db {
@@ -549,6 +514,11 @@
 	MLX5_COMP_EQ_SIZE = 1024,
 };
 
+enum {
+	MLX5_PTYS_IB = 1 << 0,
+	MLX5_PTYS_EN = 1 << 2,
+};
+
 struct mlx5_db_pgdir {
 	struct list_head	list;
 	DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -584,13 +554,44 @@
 	u8	log_sz;
 };
 
+enum port_state_policy {
+	MLX5_AAA_000
+};
+
+enum phy_port_state {
+	MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+	u32			field_select;
+	bool			sm_virt_aware;
+	bool			has_smi;
+	bool			has_raw;
+	enum port_state_policy	policy;
+	enum phy_port_state	phys_state;
+	enum ib_port_state	vport_state;
+	u8			port_physical_state;
+	u64			sys_image_guid;
+	u64			port_guid;
+	u64			node_guid;
+	u32			cap_mask1;
+	u32			cap_mask1_perm;
+	u32			cap_mask2;
+	u32			cap_mask2_perm;
+	u16			lid;
+	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+	u8			lmc;
+	u8			subnet_timeout;
+	u16			sm_lid;
+	u8			sm_sl;
+	u16			qkey_violation_counter;
+	u16			pkey_violation_counter;
+	bool			grh_required;
+};
+
 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
 {
-	if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
 		return buf->direct.buf + offset;
-	else
-		return buf->page_list[offset >> PAGE_SHIFT].buf +
-			(offset & (PAGE_SIZE - 1));
 }
 
 extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +655,8 @@
 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
 int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
-		       u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+		       enum mlx5_cap_mode cap_mode);
 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
 		  int out_size);
 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,19 +666,21 @@
 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_health_cleanup(void);
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
-		   struct mlx5_buf *buf);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
 						      gfp_t flags, int npages);
 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
 				 struct mlx5_cmd_mailbox *head);
 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			 struct mlx5_create_srq_mbox_in *in, int inlen);
+			 struct mlx5_create_srq_mbox_in *in, int inlen,
+			 int is_xrc);
 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 			struct mlx5_query_srq_mbox_out *out);
@@ -734,7 +737,32 @@
 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
 			 int size_in, void *data_out, int size_out,
 			 u16 reg_num, int arg, int write);
+
 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+			 int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+			      u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+				u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+				    u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+			       u8 *proto_oper, int proto_mask,
+			       u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+			int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+			 enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+			      u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+			      u8 *vl_hw_cap, u8 local_port);
 
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -799,6 +827,7 @@
 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
 
 struct mlx5_profile {
 	u64	mask;
@@ -809,4 +838,14 @@
 	} mr_cache[MAX_MR_CACHE_ENTRIES];
 };
 
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+	if (param > 4) {
+		pr_warn("gid table length is zero\n");
+		return 0;
+	}
+
+	return 8 * (1 << param);
+}
+
 #endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644
index 0000000..5f922c6
--- /dev/null
+++ b/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+	u8	log_sz;
+	u8	match_criteria_enable;
+	u32	match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+			     u16 num_groups,
+			     struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+			      void *match_criteria, void *flow_context,
+			      u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cb3ad17..6d2f6fe 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- */
-
+*/
 #ifndef MLX5_IFC_H
 #define MLX5_IFC_H
 
 enum {
+	MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS                   = 0x0,
+	MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED             = 0x1,
+	MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED           = 0x2,
+	MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED                  = 0x3,
+	MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED                    = 0x13,
+	MLX5_EVENT_TYPE_CODING_SRQ_LIMIT                           = 0x14,
+	MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED          = 0x1c,
+	MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION            = 0x1d,
+	MLX5_EVENT_TYPE_CODING_CQ_ERROR                            = 0x4,
+	MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR         = 0x5,
+	MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED               = 0x7,
+	MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT                    = 0xc,
+	MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR      = 0x10,
+	MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR     = 0x11,
+	MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR        = 0x12,
+	MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR                      = 0x8,
+	MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE                   = 0x9,
+	MLX5_EVENT_TYPE_CODING_GPIO_EVENT                          = 0x15,
+	MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+	MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+	MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT                      = 0x1b,
+	MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT         = 0x1f,
+	MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION        = 0xa,
+	MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb
+};
+
+enum {
+	MLX5_MODIFY_TIR_BITMASK_LRO                   = 0x0,
+	MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE        = 0x1,
+	MLX5_MODIFY_TIR_BITMASK_HASH                  = 0x2,
+	MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN   = 0x3
+};
+
+enum {
 	MLX5_CMD_OP_QUERY_HCA_CAP                 = 0x100,
 	MLX5_CMD_OP_QUERY_ADAPTER                 = 0x101,
 	MLX5_CMD_OP_INIT_HCA                      = 0x102,
@@ -43,6 +76,8 @@
 	MLX5_CMD_OP_QUERY_PAGES                   = 0x107,
 	MLX5_CMD_OP_MANAGE_PAGES                  = 0x108,
 	MLX5_CMD_OP_SET_HCA_CAP                   = 0x109,
+	MLX5_CMD_OP_QUERY_ISSI                    = 0x10a,
+	MLX5_CMD_OP_SET_ISSI                      = 0x10b,
 	MLX5_CMD_OP_CREATE_MKEY                   = 0x200,
 	MLX5_CMD_OP_QUERY_MKEY                    = 0x201,
 	MLX5_CMD_OP_DESTROY_MKEY                  = 0x202,
@@ -66,6 +101,7 @@
 	MLX5_CMD_OP_2ERR_QP                       = 0x507,
 	MLX5_CMD_OP_2RST_QP                       = 0x50a,
 	MLX5_CMD_OP_QUERY_QP                      = 0x50b,
+	MLX5_CMD_OP_SQD_RTS_QP                    = 0x50c,
 	MLX5_CMD_OP_INIT2INIT_QP                  = 0x50e,
 	MLX5_CMD_OP_CREATE_PSV                    = 0x600,
 	MLX5_CMD_OP_DESTROY_PSV                   = 0x601,
@@ -73,7 +109,10 @@
 	MLX5_CMD_OP_DESTROY_SRQ                   = 0x701,
 	MLX5_CMD_OP_QUERY_SRQ                     = 0x702,
 	MLX5_CMD_OP_ARM_RQ                        = 0x703,
-	MLX5_CMD_OP_RESIZE_SRQ                    = 0x704,
+	MLX5_CMD_OP_CREATE_XRC_SRQ                = 0x705,
+	MLX5_CMD_OP_DESTROY_XRC_SRQ               = 0x706,
+	MLX5_CMD_OP_QUERY_XRC_SRQ                 = 0x707,
+	MLX5_CMD_OP_ARM_XRC_SRQ                   = 0x708,
 	MLX5_CMD_OP_CREATE_DCT                    = 0x710,
 	MLX5_CMD_OP_DESTROY_DCT                   = 0x711,
 	MLX5_CMD_OP_DRAIN_DCT                     = 0x712,
@@ -85,8 +124,12 @@
 	MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT      = 0x753,
 	MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT       = 0x754,
 	MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT      = 0x755,
-	MLX5_CMD_OP_QUERY_RCOE_ADDRESS            = 0x760,
+	MLX5_CMD_OP_QUERY_ROCE_ADDRESS            = 0x760,
 	MLX5_CMD_OP_SET_ROCE_ADDRESS              = 0x761,
+	MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT       = 0x762,
+	MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT      = 0x763,
+	MLX5_CMD_OP_QUERY_HCA_VPORT_GID           = 0x764,
+	MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY          = 0x765,
 	MLX5_CMD_OP_QUERY_VPORT_COUNTER           = 0x770,
 	MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
 	MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
@@ -98,7 +141,7 @@
 	MLX5_CMD_OP_CONFIG_INT_MODERATION         = 0x804,
 	MLX5_CMD_OP_ACCESS_REG                    = 0x805,
 	MLX5_CMD_OP_ATTACH_TO_MCG                 = 0x806,
-	MLX5_CMD_OP_DETACH_FROM_MCG               = 0x807,
+	MLX5_CMD_OP_DETTACH_FROM_MCG              = 0x807,
 	MLX5_CMD_OP_GET_DROPPED_PACKET_LOG        = 0x80a,
 	MLX5_CMD_OP_MAD_IFC                       = 0x50d,
 	MLX5_CMD_OP_QUERY_MAD_DEMUX               = 0x80b,
@@ -106,23 +149,22 @@
 	MLX5_CMD_OP_NOP                           = 0x80d,
 	MLX5_CMD_OP_ALLOC_XRCD                    = 0x80e,
 	MLX5_CMD_OP_DEALLOC_XRCD                  = 0x80f,
-	MLX5_CMD_OP_SET_BURST_SIZE                = 0x812,
-	MLX5_CMD_OP_QUERY_BURST_SZIE              = 0x813,
-	MLX5_CMD_OP_ACTIVATE_TRACER               = 0x814,
-	MLX5_CMD_OP_DEACTIVATE_TRACER             = 0x815,
-	MLX5_CMD_OP_CREATE_SNIFFER_RULE           = 0x820,
-	MLX5_CMD_OP_DESTROY_SNIFFER_RULE          = 0x821,
-	MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x822,
-	MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x823,
-	MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x824,
+	MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN        = 0x816,
+	MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN      = 0x817,
+	MLX5_CMD_OP_QUERY_CONG_STATUS             = 0x822,
+	MLX5_CMD_OP_MODIFY_CONG_STATUS            = 0x823,
+	MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x824,
+	MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x825,
+	MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x826,
+	MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT           = 0x827,
+	MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT        = 0x828,
+	MLX5_CMD_OP_SET_L2_TABLE_ENTRY            = 0x829,
+	MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY          = 0x82a,
+	MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY         = 0x82b,
 	MLX5_CMD_OP_CREATE_TIR                    = 0x900,
 	MLX5_CMD_OP_MODIFY_TIR                    = 0x901,
 	MLX5_CMD_OP_DESTROY_TIR                   = 0x902,
 	MLX5_CMD_OP_QUERY_TIR                     = 0x903,
-	MLX5_CMD_OP_CREATE_TIS                    = 0x912,
-	MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
-	MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
-	MLX5_CMD_OP_QUERY_TIS                     = 0x915,
 	MLX5_CMD_OP_CREATE_SQ                     = 0x904,
 	MLX5_CMD_OP_MODIFY_SQ                     = 0x905,
 	MLX5_CMD_OP_DESTROY_SQ                    = 0x906,
@@ -135,9 +177,430 @@
 	MLX5_CMD_OP_MODIFY_RMP                    = 0x90d,
 	MLX5_CMD_OP_DESTROY_RMP                   = 0x90e,
 	MLX5_CMD_OP_QUERY_RMP                     = 0x90f,
-	MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x910,
-	MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x911,
-	MLX5_CMD_OP_MAX				  = 0x911
+	MLX5_CMD_OP_CREATE_TIS                    = 0x912,
+	MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
+	MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
+	MLX5_CMD_OP_QUERY_TIS                     = 0x915,
+	MLX5_CMD_OP_CREATE_RQT                    = 0x916,
+	MLX5_CMD_OP_MODIFY_RQT                    = 0x917,
+	MLX5_CMD_OP_DESTROY_RQT                   = 0x918,
+	MLX5_CMD_OP_QUERY_RQT                     = 0x919,
+	MLX5_CMD_OP_CREATE_FLOW_TABLE             = 0x930,
+	MLX5_CMD_OP_DESTROY_FLOW_TABLE            = 0x931,
+	MLX5_CMD_OP_QUERY_FLOW_TABLE              = 0x932,
+	MLX5_CMD_OP_CREATE_FLOW_GROUP             = 0x933,
+	MLX5_CMD_OP_DESTROY_FLOW_GROUP            = 0x934,
+	MLX5_CMD_OP_QUERY_FLOW_GROUP              = 0x935,
+	MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
+	MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
+	MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+	u8         outer_dmac[0x1];
+	u8         outer_smac[0x1];
+	u8         outer_ether_type[0x1];
+	u8         reserved_0[0x1];
+	u8         outer_first_prio[0x1];
+	u8         outer_first_cfi[0x1];
+	u8         outer_first_vid[0x1];
+	u8         reserved_1[0x1];
+	u8         outer_second_prio[0x1];
+	u8         outer_second_cfi[0x1];
+	u8         outer_second_vid[0x1];
+	u8         reserved_2[0x1];
+	u8         outer_sip[0x1];
+	u8         outer_dip[0x1];
+	u8         outer_frag[0x1];
+	u8         outer_ip_protocol[0x1];
+	u8         outer_ip_ecn[0x1];
+	u8         outer_ip_dscp[0x1];
+	u8         outer_udp_sport[0x1];
+	u8         outer_udp_dport[0x1];
+	u8         outer_tcp_sport[0x1];
+	u8         outer_tcp_dport[0x1];
+	u8         outer_tcp_flags[0x1];
+	u8         outer_gre_protocol[0x1];
+	u8         outer_gre_key[0x1];
+	u8         outer_vxlan_vni[0x1];
+	u8         reserved_3[0x5];
+	u8         source_eswitch_port[0x1];
+
+	u8         inner_dmac[0x1];
+	u8         inner_smac[0x1];
+	u8         inner_ether_type[0x1];
+	u8         reserved_4[0x1];
+	u8         inner_first_prio[0x1];
+	u8         inner_first_cfi[0x1];
+	u8         inner_first_vid[0x1];
+	u8         reserved_5[0x1];
+	u8         inner_second_prio[0x1];
+	u8         inner_second_cfi[0x1];
+	u8         inner_second_vid[0x1];
+	u8         reserved_6[0x1];
+	u8         inner_sip[0x1];
+	u8         inner_dip[0x1];
+	u8         inner_frag[0x1];
+	u8         inner_ip_protocol[0x1];
+	u8         inner_ip_ecn[0x1];
+	u8         inner_ip_dscp[0x1];
+	u8         inner_udp_sport[0x1];
+	u8         inner_udp_dport[0x1];
+	u8         inner_tcp_sport[0x1];
+	u8         inner_tcp_dport[0x1];
+	u8         inner_tcp_flags[0x1];
+	u8         reserved_7[0x9];
+
+	u8         reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+	u8         ft_support[0x1];
+	u8         reserved_0[0x1f];
+
+	u8         reserved_1[0x2];
+	u8         log_max_ft_size[0x6];
+	u8         reserved_2[0x10];
+	u8         max_ft_level[0x8];
+
+	u8         reserved_3[0x20];
+
+	u8         reserved_4[0x18];
+	u8         log_max_ft_num[0x8];
+
+	u8         reserved_5[0x18];
+	u8         log_max_destination[0x8];
+
+	u8         reserved_6[0x18];
+	u8         log_max_flow[0x8];
+
+	u8         reserved_7[0x40];
+
+	struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+	struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+	u8         send[0x1];
+	u8         receive[0x1];
+	u8         write[0x1];
+	u8         read[0x1];
+	u8         reserved_0[0x1];
+	u8         srq_receive[0x1];
+	u8         reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+	u8         smac_47_16[0x20];
+
+	u8         smac_15_0[0x10];
+	u8         ethertype[0x10];
+
+	u8         dmac_47_16[0x20];
+
+	u8         dmac_15_0[0x10];
+	u8         first_prio[0x3];
+	u8         first_cfi[0x1];
+	u8         first_vid[0xc];
+
+	u8         ip_protocol[0x8];
+	u8         ip_dscp[0x6];
+	u8         ip_ecn[0x2];
+	u8         vlan_tag[0x1];
+	u8         reserved_0[0x1];
+	u8         frag[0x1];
+	u8         reserved_1[0x4];
+	u8         tcp_flags[0x9];
+
+	u8         tcp_sport[0x10];
+	u8         tcp_dport[0x10];
+
+	u8         reserved_2[0x20];
+
+	u8         udp_sport[0x10];
+	u8         udp_dport[0x10];
+
+	u8         src_ip[4][0x20];
+
+	u8         dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+	u8         reserved_0[0x20];
+
+	u8         reserved_1[0x10];
+	u8         source_port[0x10];
+
+	u8         outer_second_prio[0x3];
+	u8         outer_second_cfi[0x1];
+	u8         outer_second_vid[0xc];
+	u8         inner_second_prio[0x3];
+	u8         inner_second_cfi[0x1];
+	u8         inner_second_vid[0xc];
+
+	u8         outer_second_vlan_tag[0x1];
+	u8         inner_second_vlan_tag[0x1];
+	u8         reserved_2[0xe];
+	u8         gre_protocol[0x10];
+
+	u8         gre_key_h[0x18];
+	u8         gre_key_l[0x8];
+
+	u8         vxlan_vni[0x18];
+	u8         reserved_3[0x8];
+
+	u8         reserved_4[0x20];
+
+	u8         reserved_5[0xc];
+	u8         outer_ipv6_flow_label[0x14];
+
+	u8         reserved_6[0xc];
+	u8         inner_ipv6_flow_label[0x14];
+
+	u8         reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+	u8         pa_h[0x20];
+
+	u8         pa_l[0x14];
+	u8         reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+	u8         hi[0x20];
+
+	u8         lo[0x20];
+};
+
+enum {
+	MLX5_ADS_STAT_RATE_NO_LIMIT  = 0x0,
+	MLX5_ADS_STAT_RATE_2_5GBPS   = 0x7,
+	MLX5_ADS_STAT_RATE_10GBPS    = 0x8,
+	MLX5_ADS_STAT_RATE_30GBPS    = 0x9,
+	MLX5_ADS_STAT_RATE_5GBPS     = 0xa,
+	MLX5_ADS_STAT_RATE_20GBPS    = 0xb,
+	MLX5_ADS_STAT_RATE_40GBPS    = 0xc,
+	MLX5_ADS_STAT_RATE_60GBPS    = 0xd,
+	MLX5_ADS_STAT_RATE_80GBPS    = 0xe,
+	MLX5_ADS_STAT_RATE_120GBPS   = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+	u8         fl[0x1];
+	u8         free_ar[0x1];
+	u8         reserved_0[0xe];
+	u8         pkey_index[0x10];
+
+	u8         reserved_1[0x8];
+	u8         grh[0x1];
+	u8         mlid[0x7];
+	u8         rlid[0x10];
+
+	u8         ack_timeout[0x5];
+	u8         reserved_2[0x3];
+	u8         src_addr_index[0x8];
+	u8         reserved_3[0x4];
+	u8         stat_rate[0x4];
+	u8         hop_limit[0x8];
+
+	u8         reserved_4[0x4];
+	u8         tclass[0x8];
+	u8         flow_label[0x14];
+
+	u8         rgid_rip[16][0x8];
+
+	u8         reserved_5[0x4];
+	u8         f_dscp[0x1];
+	u8         f_ecn[0x1];
+	u8         reserved_6[0x1];
+	u8         f_eth_prio[0x1];
+	u8         ecn[0x2];
+	u8         dscp[0x6];
+	u8         udp_sport[0x10];
+
+	u8         dei_cfi[0x1];
+	u8         eth_prio[0x3];
+	u8         sl[0x4];
+	u8         port[0x8];
+	u8         rmac_47_32[0x10];
+
+	u8         rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+	u8         reserved_0[0x200];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+	u8         reserved_1[0x200];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+	u8         reserved_2[0x200];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+	u8         reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+	u8         csum_cap[0x1];
+	u8         vlan_cap[0x1];
+	u8         lro_cap[0x1];
+	u8         lro_psh_flag[0x1];
+	u8         lro_time_stamp[0x1];
+	u8         reserved_0[0x6];
+	u8         max_lso_cap[0x5];
+	u8         reserved_1[0x4];
+	u8         rss_ind_tbl_cap[0x4];
+	u8         reserved_2[0x3];
+	u8         tunnel_lso_const_out_ip_id[0x1];
+	u8         reserved_3[0x2];
+	u8         tunnel_statless_gre[0x1];
+	u8         tunnel_stateless_vxlan[0x1];
+
+	u8         reserved_4[0x20];
+
+	u8         reserved_5[0x10];
+	u8         lro_min_mss_size[0x10];
+
+	u8         reserved_6[0x120];
+
+	u8         lro_timer_supported_periods[4][0x20];
+
+	u8         reserved_7[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+	u8         roce_apm[0x1];
+	u8         reserved_0[0x1f];
+
+	u8         reserved_1[0x60];
+
+	u8         reserved_2[0xc];
+	u8         l3_type[0x4];
+	u8         reserved_3[0x8];
+	u8         roce_version[0x8];
+
+	u8         reserved_4[0x10];
+	u8         r_roce_dest_udp_port[0x10];
+
+	u8         r_roce_max_src_udp_port[0x10];
+	u8         r_roce_min_src_udp_port[0x10];
+
+	u8         reserved_5[0x10];
+	u8         roce_address_table_size[0x10];
+
+	u8         reserved_6[0x700];
+};
+
+enum {
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE     = 0x0,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES    = 0x2,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES    = 0x4,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES    = 0x8,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES   = 0x10,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES   = 0x20,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES   = 0x40,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES  = 0x80,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES  = 0x100,
+};
+
+enum {
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE     = 0x1,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES    = 0x2,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES    = 0x4,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES    = 0x8,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES   = 0x10,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES   = 0x20,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES   = 0x40,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES  = 0x80,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES  = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+	u8         reserved_0[0x40];
+
+	u8         atomic_req_endianness[0x1];
+	u8         reserved_1[0x1f];
+
+	u8         reserved_2[0x20];
+
+	u8         reserved_3[0x10];
+	u8         atomic_operations[0x10];
+
+	u8         reserved_4[0x10];
+	u8         atomic_size_qp[0x10];
+
+	u8         reserved_5[0x10];
+	u8         atomic_size_dc[0x10];
+
+	u8         reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+	u8         reserved_0[0x40];
+
+	u8         sig[0x1];
+	u8         reserved_1[0x1f];
+
+	u8         reserved_2[0x20];
+
+	struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+	struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+	struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+	u8         reserved_3[0x720];
+};
+
+enum {
+	MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
+	MLX5_WQ_TYPE_CYCLIC       = 0x1,
+	MLX5_WQ_TYPE_STRQ         = 0x2,
+};
+
+enum {
+	MLX5_WQ_END_PAD_MODE_NONE   = 0x0,
+	MLX5_WQ_END_PAD_MODE_ALIGN  = 0x1,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES    = 0x0,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES   = 0x1,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES   = 0x2,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES   = 0x3,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES  = 0x4,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES  = 0x0,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES  = 0x1,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES  = 0x2,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES   = 0x3,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES   = 0x4,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES   = 0x5,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_PORT_TYPE_IB        = 0x0,
+	MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET  = 0x1,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED       = 0x0,
+	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE  = 0x1,
+	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED        = 0x3,
+};
+
+enum {
+	MLX5_CAP_PORT_TYPE_IB  = 0x0,
+	MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
 struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@
 	u8         reserved_1[0xb];
 	u8         log_max_qp[0x5];
 
-	u8         log_max_strq_sz[0x8];
-	u8         reserved_2[0x3];
-	u8         log_max_srqs[0x5];
+	u8         reserved_2[0xb];
+	u8         log_max_srq[0x5];
 	u8         reserved_3[0x10];
 
 	u8         reserved_4[0x8];
@@ -185,123 +647,2112 @@
 	u8         pad_cap[0x1];
 	u8         cc_query_allowed[0x1];
 	u8         cc_modify_allowed[0x1];
-	u8         reserved_15[0x1d];
+	u8         reserved_15[0xd];
+	u8         gid_table_size[0x10];
 
-	u8         reserved_16[0x6];
+	u8         out_of_seq_cnt[0x1];
+	u8         vport_counters[0x1];
+	u8         reserved_16[0x4];
 	u8         max_qp_cnt[0xa];
 	u8         pkey_table_size[0x10];
 
-	u8         eswitch_owner[0x1];
-	u8         reserved_17[0xa];
+	u8         vport_group_manager[0x1];
+	u8         vhca_group_manager[0x1];
+	u8         ib_virt[0x1];
+	u8         eth_virt[0x1];
+	u8         reserved_17[0x1];
+	u8         ets[0x1];
+	u8         nic_flow_table[0x1];
+	u8         reserved_18[0x4];
 	u8         local_ca_ack_delay[0x5];
-	u8         reserved_18[0x8];
+	u8         reserved_19[0x6];
+	u8         port_type[0x2];
 	u8         num_ports[0x8];
 
-	u8         reserved_19[0x3];
+	u8         reserved_20[0x3];
 	u8         log_max_msg[0x5];
-	u8         reserved_20[0x18];
+	u8         reserved_21[0x18];
 
 	u8         stat_rate_support[0x10];
-	u8         reserved_21[0x10];
+	u8         reserved_22[0xc];
+	u8         cqe_version[0x4];
 
-	u8         reserved_22[0x10];
+	u8         compact_address_vector[0x1];
+	u8         reserved_23[0xe];
+	u8         drain_sigerr[0x1];
 	u8         cmdif_checksum[0x2];
 	u8         sigerr_cqe[0x1];
-	u8         reserved_23[0x1];
+	u8         reserved_24[0x1];
 	u8         wq_signature[0x1];
 	u8         sctr_data_cqe[0x1];
-	u8         reserved_24[0x1];
+	u8         reserved_25[0x1];
 	u8         sho[0x1];
 	u8         tph[0x1];
 	u8         rf[0x1];
-	u8         dc[0x1];
-	u8         reserved_25[0x2];
+	u8         dct[0x1];
+	u8         reserved_26[0x1];
+	u8         eth_net_offloads[0x1];
 	u8         roce[0x1];
 	u8         atomic[0x1];
-	u8         rsz_srq[0x1];
+	u8         reserved_27[0x1];
 
 	u8         cq_oi[0x1];
 	u8         cq_resize[0x1];
 	u8         cq_moderation[0x1];
-	u8         sniffer_rule_flow[0x1];
-	u8         sniffer_rule_vport[0x1];
-	u8         sniffer_rule_phy[0x1];
-	u8         reserved_26[0x1];
+	u8         reserved_28[0x3];
+	u8         cq_eq_remap[0x1];
 	u8         pg[0x1];
 	u8         block_lb_mc[0x1];
-	u8         reserved_27[0x3];
+	u8         reserved_29[0x1];
+	u8         scqe_break_moderation[0x1];
+	u8         reserved_30[0x1];
 	u8         cd[0x1];
-	u8         reserved_28[0x1];
+	u8         reserved_31[0x1];
 	u8         apm[0x1];
-	u8         reserved_29[0x7];
+	u8         reserved_32[0x7];
 	u8         qkv[0x1];
 	u8         pkv[0x1];
-	u8         reserved_30[0x4];
+	u8         reserved_33[0x4];
 	u8         xrc[0x1];
 	u8         ud[0x1];
 	u8         uc[0x1];
 	u8         rc[0x1];
 
-	u8         reserved_31[0xa];
+	u8         reserved_34[0xa];
 	u8         uar_sz[0x6];
-	u8         reserved_32[0x8];
+	u8         reserved_35[0x8];
 	u8         log_pg_sz[0x8];
 
 	u8         bf[0x1];
-	u8         reserved_33[0xa];
+	u8         reserved_36[0x1];
+	u8         pad_tx_eth_packet[0x1];
+	u8         reserved_37[0x8];
 	u8         log_bf_reg_size[0x5];
-	u8         reserved_34[0x10];
+	u8         reserved_38[0x10];
 
-	u8         reserved_35[0x10];
+	u8         reserved_39[0x10];
 	u8         max_wqe_sz_sq[0x10];
 
-	u8         reserved_36[0x10];
+	u8         reserved_40[0x10];
 	u8         max_wqe_sz_rq[0x10];
 
-	u8         reserved_37[0x10];
+	u8         reserved_41[0x10];
 	u8         max_wqe_sz_sq_dc[0x10];
 
-	u8         reserved_38[0x7];
+	u8         reserved_42[0x7];
 	u8         max_qp_mcg[0x19];
 
-	u8         reserved_39[0x18];
+	u8         reserved_43[0x18];
 	u8         log_max_mcg[0x8];
 
-	u8         reserved_40[0xb];
+	u8         reserved_44[0x3];
+	u8         log_max_transport_domain[0x5];
+	u8         reserved_45[0x3];
 	u8         log_max_pd[0x5];
-	u8         reserved_41[0xb];
+	u8         reserved_46[0xb];
 	u8         log_max_xrcd[0x5];
 
-	u8         reserved_42[0x20];
+	u8         reserved_47[0x20];
 
-	u8         reserved_43[0x3];
+	u8         reserved_48[0x3];
 	u8         log_max_rq[0x5];
-	u8         reserved_44[0x3];
+	u8         reserved_49[0x3];
 	u8         log_max_sq[0x5];
-	u8         reserved_45[0x3];
+	u8         reserved_50[0x3];
 	u8         log_max_tir[0x5];
-	u8         reserved_46[0x3];
+	u8         reserved_51[0x3];
 	u8         log_max_tis[0x5];
 
-	u8         reserved_47[0x13];
-	u8         log_max_rq_per_tir[0x5];
-	u8         reserved_48[0x3];
+	u8         basic_cyclic_rcv_wqe[0x1];
+	u8         reserved_52[0x2];
+	u8         log_max_rmp[0x5];
+	u8         reserved_53[0x3];
+	u8         log_max_rqt[0x5];
+	u8         reserved_54[0x3];
+	u8         log_max_rqt_size[0x5];
+	u8         reserved_55[0x3];
 	u8         log_max_tis_per_sq[0x5];
 
-	u8         reserved_49[0xe0];
+	u8         reserved_56[0x3];
+	u8         log_max_stride_sz_rq[0x5];
+	u8         reserved_57[0x3];
+	u8         log_min_stride_sz_rq[0x5];
+	u8         reserved_58[0x3];
+	u8         log_max_stride_sz_sq[0x5];
+	u8         reserved_59[0x3];
+	u8         log_min_stride_sz_sq[0x5];
 
-	u8         reserved_50[0x10];
+	u8         reserved_60[0x1b];
+	u8         log_max_wq_sz[0x5];
+
+	u8         reserved_61[0xa0];
+
+	u8         reserved_62[0x3];
+	u8         log_max_l2_table[0x5];
+	u8         reserved_63[0x8];
 	u8         log_uar_page_sz[0x10];
 
-	u8         reserved_51[0x100];
+	u8         reserved_64[0x100];
 
-	u8         reserved_52[0x1f];
+	u8         reserved_65[0x1f];
 	u8         cqe_zip[0x1];
 
 	u8         cqe_zip_timeout[0x10];
 	u8         cqe_zip_max_num[0x10];
 
-	u8         reserved_53[0x220];
+	u8         reserved_66[0x220];
+};
+
+enum {
+	MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_  = 0x1,
+	MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR          = 0x2,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+	u8         destination_type[0x8];
+	u8         destination_id[0x18];
+
+	u8         reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+	struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+	struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+	u8         reserved_0[0xa00];
+};
+
+enum {
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+	u8         l3_prot_type[0x1];
+	u8         l4_prot_type[0x1];
+	u8         selected_fields[0x1e];
+};
+
+enum {
+	MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST  = 0x0,
+	MLX5_WQ_WQ_TYPE_WQ_CYCLIC       = 0x1,
+};
+
+enum {
+	MLX5_WQ_END_PADDING_MODE_END_PAD_NONE   = 0x0,
+	MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN  = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+	u8         wq_type[0x4];
+	u8         wq_signature[0x1];
+	u8         end_padding_mode[0x2];
+	u8         cd_slave[0x1];
+	u8         reserved_0[0x18];
+
+	u8         hds_skip_first_sge[0x1];
+	u8         log2_hds_buf_size[0x3];
+	u8         reserved_1[0x7];
+	u8         page_offset[0x5];
+	u8         lwm[0x10];
+
+	u8         reserved_2[0x8];
+	u8         pd[0x18];
+
+	u8         reserved_3[0x8];
+	u8         uar_page[0x18];
+
+	u8         dbr_addr[0x40];
+
+	u8         hw_counter[0x20];
+
+	u8         sw_counter[0x20];
+
+	u8         reserved_4[0xc];
+	u8         log_wq_stride[0x4];
+	u8         reserved_5[0x3];
+	u8         log_wq_pg_sz[0x5];
+	u8         reserved_6[0x3];
+	u8         log_wq_sz[0x5];
+
+	u8         reserved_7[0x4e0];
+
+	struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+	u8         reserved_0[0x8];
+	u8         rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+	u8         reserved_0[0x10];
+	u8         mac_addr_47_32[0x10];
+
+	u8         mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+	u8         reserved_0[0xa0];
+
+	u8         min_time_between_cnps[0x20];
+
+	u8         reserved_1[0x12];
+	u8         cnp_dscp[0x6];
+	u8         reserved_2[0x5];
+	u8         cnp_802p_prio[0x3];
+
+	u8         reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+	u8         reserved_0[0x60];
+
+	u8         reserved_1[0x4];
+	u8         clamp_tgt_rate[0x1];
+	u8         reserved_2[0x3];
+	u8         clamp_tgt_rate_after_time_inc[0x1];
+	u8         reserved_3[0x17];
+
+	u8         reserved_4[0x20];
+
+	u8         rpg_time_reset[0x20];
+
+	u8         rpg_byte_reset[0x20];
+
+	u8         rpg_threshold[0x20];
+
+	u8         rpg_max_rate[0x20];
+
+	u8         rpg_ai_rate[0x20];
+
+	u8         rpg_hai_rate[0x20];
+
+	u8         rpg_gd[0x20];
+
+	u8         rpg_min_dec_fac[0x20];
+
+	u8         rpg_min_rate[0x20];
+
+	u8         reserved_5[0xe0];
+
+	u8         rate_to_set_on_first_cnp[0x20];
+
+	u8         dce_tcp_g[0x20];
+
+	u8         dce_tcp_rtt[0x20];
+
+	u8         rate_reduce_monitor_period[0x20];
+
+	u8         reserved_6[0x20];
+
+	u8         initial_alpha_value[0x20];
+
+	u8         reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+	u8         reserved_0[0x80];
+
+	u8         rppp_max_rps[0x20];
+
+	u8         rpg_time_reset[0x20];
+
+	u8         rpg_byte_reset[0x20];
+
+	u8         rpg_threshold[0x20];
+
+	u8         rpg_max_rate[0x20];
+
+	u8         rpg_ai_rate[0x20];
+
+	u8         rpg_hai_rate[0x20];
+
+	u8         rpg_gd[0x20];
+
+	u8         rpg_min_dec_fac[0x20];
+
+	u8         rpg_min_rate[0x20];
+
+	u8         reserved_1[0x640];
+};
+
+enum {
+	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE    = 0x1,
+	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET    = 0x2,
+	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE  = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+	u8         resize_field_select[0x20];
+};
+
+enum {
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD     = 0x1,
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT  = 0x2,
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI            = 0x4,
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN         = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+	u8         modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+	u8         field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+	u8         field_select_r_roce_rp[0x20];
+};
+
+enum {
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS     = 0x4,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET   = 0x8,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET   = 0x10,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD    = 0x20,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE     = 0x40,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE      = 0x80,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE     = 0x100,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD           = 0x200,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC  = 0x400,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE     = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+	u8         field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+	u8         time_since_last_clear_high[0x20];
+
+	u8         time_since_last_clear_low[0x20];
+
+	u8         symbol_errors_high[0x20];
+
+	u8         symbol_errors_low[0x20];
+
+	u8         sync_headers_errors_high[0x20];
+
+	u8         sync_headers_errors_low[0x20];
+
+	u8         edpl_bip_errors_lane0_high[0x20];
+
+	u8         edpl_bip_errors_lane0_low[0x20];
+
+	u8         edpl_bip_errors_lane1_high[0x20];
+
+	u8         edpl_bip_errors_lane1_low[0x20];
+
+	u8         edpl_bip_errors_lane2_high[0x20];
+
+	u8         edpl_bip_errors_lane2_low[0x20];
+
+	u8         edpl_bip_errors_lane3_high[0x20];
+
+	u8         edpl_bip_errors_lane3_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane0_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane0_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane1_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane1_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane2_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane2_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane3_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane3_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+	u8         rs_fec_corrected_blocks_high[0x20];
+
+	u8         rs_fec_corrected_blocks_low[0x20];
+
+	u8         rs_fec_uncorrectable_blocks_high[0x20];
+
+	u8         rs_fec_uncorrectable_blocks_low[0x20];
+
+	u8         rs_fec_no_errors_blocks_high[0x20];
+
+	u8         rs_fec_no_errors_blocks_low[0x20];
+
+	u8         rs_fec_single_error_blocks_high[0x20];
+
+	u8         rs_fec_single_error_blocks_low[0x20];
+
+	u8         rs_fec_corrected_symbols_total_high[0x20];
+
+	u8         rs_fec_corrected_symbols_total_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane0_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane0_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane1_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane1_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane2_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane2_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane3_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane3_low[0x20];
+
+	u8         link_down_events[0x20];
+
+	u8         successful_recovery_events[0x20];
+
+	u8         reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+	u8         transmit_queue_high[0x20];
+
+	u8         transmit_queue_low[0x20];
+
+	u8         reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+	u8         rx_octets_high[0x20];
+
+	u8         rx_octets_low[0x20];
+
+	u8         reserved_0[0xc0];
+
+	u8         rx_frames_high[0x20];
+
+	u8         rx_frames_low[0x20];
+
+	u8         tx_octets_high[0x20];
+
+	u8         tx_octets_low[0x20];
+
+	u8         reserved_1[0xc0];
+
+	u8         tx_frames_high[0x20];
+
+	u8         tx_frames_low[0x20];
+
+	u8         rx_pause_high[0x20];
+
+	u8         rx_pause_low[0x20];
+
+	u8         rx_pause_duration_high[0x20];
+
+	u8         rx_pause_duration_low[0x20];
+
+	u8         tx_pause_high[0x20];
+
+	u8         tx_pause_low[0x20];
+
+	u8         tx_pause_duration_high[0x20];
+
+	u8         tx_pause_duration_low[0x20];
+
+	u8         rx_pause_transition_high[0x20];
+
+	u8         rx_pause_transition_low[0x20];
+
+	u8         reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+	u8         port_transmit_wait_high[0x20];
+
+	u8         port_transmit_wait_low[0x20];
+
+	u8         reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+	u8         dot3stats_alignment_errors_high[0x20];
+
+	u8         dot3stats_alignment_errors_low[0x20];
+
+	u8         dot3stats_fcs_errors_high[0x20];
+
+	u8         dot3stats_fcs_errors_low[0x20];
+
+	u8         dot3stats_single_collision_frames_high[0x20];
+
+	u8         dot3stats_single_collision_frames_low[0x20];
+
+	u8         dot3stats_multiple_collision_frames_high[0x20];
+
+	u8         dot3stats_multiple_collision_frames_low[0x20];
+
+	u8         dot3stats_sqe_test_errors_high[0x20];
+
+	u8         dot3stats_sqe_test_errors_low[0x20];
+
+	u8         dot3stats_deferred_transmissions_high[0x20];
+
+	u8         dot3stats_deferred_transmissions_low[0x20];
+
+	u8         dot3stats_late_collisions_high[0x20];
+
+	u8         dot3stats_late_collisions_low[0x20];
+
+	u8         dot3stats_excessive_collisions_high[0x20];
+
+	u8         dot3stats_excessive_collisions_low[0x20];
+
+	u8         dot3stats_internal_mac_transmit_errors_high[0x20];
+
+	u8         dot3stats_internal_mac_transmit_errors_low[0x20];
+
+	u8         dot3stats_carrier_sense_errors_high[0x20];
+
+	u8         dot3stats_carrier_sense_errors_low[0x20];
+
+	u8         dot3stats_frame_too_longs_high[0x20];
+
+	u8         dot3stats_frame_too_longs_low[0x20];
+
+	u8         dot3stats_internal_mac_receive_errors_high[0x20];
+
+	u8         dot3stats_internal_mac_receive_errors_low[0x20];
+
+	u8         dot3stats_symbol_errors_high[0x20];
+
+	u8         dot3stats_symbol_errors_low[0x20];
+
+	u8         dot3control_in_unknown_opcodes_high[0x20];
+
+	u8         dot3control_in_unknown_opcodes_low[0x20];
+
+	u8         dot3in_pause_frames_high[0x20];
+
+	u8         dot3in_pause_frames_low[0x20];
+
+	u8         dot3out_pause_frames_high[0x20];
+
+	u8         dot3out_pause_frames_low[0x20];
+
+	u8         reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+	u8         ether_stats_drop_events_high[0x20];
+
+	u8         ether_stats_drop_events_low[0x20];
+
+	u8         ether_stats_octets_high[0x20];
+
+	u8         ether_stats_octets_low[0x20];
+
+	u8         ether_stats_pkts_high[0x20];
+
+	u8         ether_stats_pkts_low[0x20];
+
+	u8         ether_stats_broadcast_pkts_high[0x20];
+
+	u8         ether_stats_broadcast_pkts_low[0x20];
+
+	u8         ether_stats_multicast_pkts_high[0x20];
+
+	u8         ether_stats_multicast_pkts_low[0x20];
+
+	u8         ether_stats_crc_align_errors_high[0x20];
+
+	u8         ether_stats_crc_align_errors_low[0x20];
+
+	u8         ether_stats_undersize_pkts_high[0x20];
+
+	u8         ether_stats_undersize_pkts_low[0x20];
+
+	u8         ether_stats_oversize_pkts_high[0x20];
+
+	u8         ether_stats_oversize_pkts_low[0x20];
+
+	u8         ether_stats_fragments_high[0x20];
+
+	u8         ether_stats_fragments_low[0x20];
+
+	u8         ether_stats_jabbers_high[0x20];
+
+	u8         ether_stats_jabbers_low[0x20];
+
+	u8         ether_stats_collisions_high[0x20];
+
+	u8         ether_stats_collisions_low[0x20];
+
+	u8         ether_stats_pkts64octets_high[0x20];
+
+	u8         ether_stats_pkts64octets_low[0x20];
+
+	u8         ether_stats_pkts65to127octets_high[0x20];
+
+	u8         ether_stats_pkts65to127octets_low[0x20];
+
+	u8         ether_stats_pkts128to255octets_high[0x20];
+
+	u8         ether_stats_pkts128to255octets_low[0x20];
+
+	u8         ether_stats_pkts256to511octets_high[0x20];
+
+	u8         ether_stats_pkts256to511octets_low[0x20];
+
+	u8         ether_stats_pkts512to1023octets_high[0x20];
+
+	u8         ether_stats_pkts512to1023octets_low[0x20];
+
+	u8         ether_stats_pkts1024to1518octets_high[0x20];
+
+	u8         ether_stats_pkts1024to1518octets_low[0x20];
+
+	u8         ether_stats_pkts1519to2047octets_high[0x20];
+
+	u8         ether_stats_pkts1519to2047octets_low[0x20];
+
+	u8         ether_stats_pkts2048to4095octets_high[0x20];
+
+	u8         ether_stats_pkts2048to4095octets_low[0x20];
+
+	u8         ether_stats_pkts4096to8191octets_high[0x20];
+
+	u8         ether_stats_pkts4096to8191octets_low[0x20];
+
+	u8         ether_stats_pkts8192to10239octets_high[0x20];
+
+	u8         ether_stats_pkts8192to10239octets_low[0x20];
+
+	u8         reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+	u8         if_in_octets_high[0x20];
+
+	u8         if_in_octets_low[0x20];
+
+	u8         if_in_ucast_pkts_high[0x20];
+
+	u8         if_in_ucast_pkts_low[0x20];
+
+	u8         if_in_discards_high[0x20];
+
+	u8         if_in_discards_low[0x20];
+
+	u8         if_in_errors_high[0x20];
+
+	u8         if_in_errors_low[0x20];
+
+	u8         if_in_unknown_protos_high[0x20];
+
+	u8         if_in_unknown_protos_low[0x20];
+
+	u8         if_out_octets_high[0x20];
+
+	u8         if_out_octets_low[0x20];
+
+	u8         if_out_ucast_pkts_high[0x20];
+
+	u8         if_out_ucast_pkts_low[0x20];
+
+	u8         if_out_discards_high[0x20];
+
+	u8         if_out_discards_low[0x20];
+
+	u8         if_out_errors_high[0x20];
+
+	u8         if_out_errors_low[0x20];
+
+	u8         if_in_multicast_pkts_high[0x20];
+
+	u8         if_in_multicast_pkts_low[0x20];
+
+	u8         if_in_broadcast_pkts_high[0x20];
+
+	u8         if_in_broadcast_pkts_low[0x20];
+
+	u8         if_out_multicast_pkts_high[0x20];
+
+	u8         if_out_multicast_pkts_low[0x20];
+
+	u8         if_out_broadcast_pkts_high[0x20];
+
+	u8         if_out_broadcast_pkts_low[0x20];
+
+	u8         reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+	u8         a_frames_transmitted_ok_high[0x20];
+
+	u8         a_frames_transmitted_ok_low[0x20];
+
+	u8         a_frames_received_ok_high[0x20];
+
+	u8         a_frames_received_ok_low[0x20];
+
+	u8         a_frame_check_sequence_errors_high[0x20];
+
+	u8         a_frame_check_sequence_errors_low[0x20];
+
+	u8         a_alignment_errors_high[0x20];
+
+	u8         a_alignment_errors_low[0x20];
+
+	u8         a_octets_transmitted_ok_high[0x20];
+
+	u8         a_octets_transmitted_ok_low[0x20];
+
+	u8         a_octets_received_ok_high[0x20];
+
+	u8         a_octets_received_ok_low[0x20];
+
+	u8         a_multicast_frames_xmitted_ok_high[0x20];
+
+	u8         a_multicast_frames_xmitted_ok_low[0x20];
+
+	u8         a_broadcast_frames_xmitted_ok_high[0x20];
+
+	u8         a_broadcast_frames_xmitted_ok_low[0x20];
+
+	u8         a_multicast_frames_received_ok_high[0x20];
+
+	u8         a_multicast_frames_received_ok_low[0x20];
+
+	u8         a_broadcast_frames_received_ok_high[0x20];
+
+	u8         a_broadcast_frames_received_ok_low[0x20];
+
+	u8         a_in_range_length_errors_high[0x20];
+
+	u8         a_in_range_length_errors_low[0x20];
+
+	u8         a_out_of_range_length_field_high[0x20];
+
+	u8         a_out_of_range_length_field_low[0x20];
+
+	u8         a_frame_too_long_errors_high[0x20];
+
+	u8         a_frame_too_long_errors_low[0x20];
+
+	u8         a_symbol_error_during_carrier_high[0x20];
+
+	u8         a_symbol_error_during_carrier_low[0x20];
+
+	u8         a_mac_control_frames_transmitted_high[0x20];
+
+	u8         a_mac_control_frames_transmitted_low[0x20];
+
+	u8         a_mac_control_frames_received_high[0x20];
+
+	u8         a_mac_control_frames_received_low[0x20];
+
+	u8         a_unsupported_opcodes_received_high[0x20];
+
+	u8         a_unsupported_opcodes_received_low[0x20];
+
+	u8         a_pause_mac_ctrl_frames_received_high[0x20];
+
+	u8         a_pause_mac_ctrl_frames_received_low[0x20];
+
+	u8         a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+	u8         a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+	u8         reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+	u8         command_completion_vector[0x20];
+
+	u8         reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+	u8         reserved_0[0x18];
+	u8         port_num[0x1];
+	u8         reserved_1[0x3];
+	u8         vl[0x4];
+
+	u8         reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+	u8         event_subtype[0x8];
+	u8         reserved_0[0x8];
+	u8         congestion_level[0x8];
+	u8         reserved_1[0x8];
+
+	u8         reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+	u8         reserved_0[0x60];
+
+	u8         gpio_event_hi[0x20];
+
+	u8         gpio_event_lo[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+	u8         reserved_0[0x40];
+
+	u8         port_num[0x4];
+	u8         reserved_1[0x1c];
+
+	u8         reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+	u8         reserved_0[0xe0];
+};
+
+enum {
+	MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN                 = 0x1,
+	MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR  = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+	u8         reserved_0[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_1[0x20];
+
+	u8         reserved_2[0x18];
+	u8         syndrome[0x8];
+
+	u8         reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+	u8         bytes_committed[0x20];
+
+	u8         r_key[0x20];
+
+	u8         reserved_0[0x10];
+	u8         packet_len[0x10];
+
+	u8         rdma_op_len[0x20];
+
+	u8         rdma_va[0x40];
+
+	u8         reserved_1[0x5];
+	u8         rdma[0x1];
+	u8         write[0x1];
+	u8         requestor[0x1];
+	u8         qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+	u8         bytes_committed[0x20];
+
+	u8         reserved_0[0x10];
+	u8         wqe_index[0x10];
+
+	u8         reserved_1[0x10];
+	u8         len[0x10];
+
+	u8         reserved_2[0x60];
+
+	u8         reserved_3[0x5];
+	u8         rdma[0x1];
+	u8         write_read[0x1];
+	u8         requestor[0x1];
+	u8         qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+	u8         reserved_0[0xa0];
+
+	u8         type[0x8];
+	u8         reserved_1[0x18];
+
+	u8         reserved_2[0x8];
+	u8         qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+	u8         reserved_0[0xc0];
+
+	u8         reserved_1[0x8];
+	u8         dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+	u8         reserved_0[0xc0];
+
+	u8         reserved_1[0x8];
+	u8         cq_number[0x18];
+};
+
+enum {
+	MLX5_QPC_STATE_RST        = 0x0,
+	MLX5_QPC_STATE_INIT       = 0x1,
+	MLX5_QPC_STATE_RTR        = 0x2,
+	MLX5_QPC_STATE_RTS        = 0x3,
+	MLX5_QPC_STATE_SQER       = 0x4,
+	MLX5_QPC_STATE_ERR        = 0x6,
+	MLX5_QPC_STATE_SQD        = 0x7,
+	MLX5_QPC_STATE_SUSPENDED  = 0x9,
+};
+
+enum {
+	MLX5_QPC_ST_RC            = 0x0,
+	MLX5_QPC_ST_UC            = 0x1,
+	MLX5_QPC_ST_UD            = 0x2,
+	MLX5_QPC_ST_XRC           = 0x3,
+	MLX5_QPC_ST_DCI           = 0x5,
+	MLX5_QPC_ST_QP0           = 0x7,
+	MLX5_QPC_ST_QP1           = 0x8,
+	MLX5_QPC_ST_RAW_DATAGRAM  = 0x9,
+	MLX5_QPC_ST_REG_UMR       = 0xc,
+};
+
+enum {
+	MLX5_QPC_PM_STATE_ARMED     = 0x0,
+	MLX5_QPC_PM_STATE_REARM     = 0x1,
+	MLX5_QPC_PM_STATE_RESERVED  = 0x2,
+	MLX5_QPC_PM_STATE_MIGRATED  = 0x3,
+};
+
+enum {
+	MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS                = 0x0,
+	MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT  = 0x1,
+};
+
+enum {
+	MLX5_QPC_MTU_256_BYTES        = 0x1,
+	MLX5_QPC_MTU_512_BYTES        = 0x2,
+	MLX5_QPC_MTU_1K_BYTES         = 0x3,
+	MLX5_QPC_MTU_2K_BYTES         = 0x4,
+	MLX5_QPC_MTU_4K_BYTES         = 0x5,
+	MLX5_QPC_MTU_RAW_ETHERNET_QP  = 0x7,
+};
+
+enum {
+	MLX5_QPC_ATOMIC_MODE_IB_SPEC     = 0x1,
+	MLX5_QPC_ATOMIC_MODE_ONLY_8B     = 0x2,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_8B    = 0x3,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_16B   = 0x4,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_32B   = 0x5,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_64B   = 0x6,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_128B  = 0x7,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_256B  = 0x8,
+};
+
+enum {
+	MLX5_QPC_CS_REQ_DISABLE    = 0x0,
+	MLX5_QPC_CS_REQ_UP_TO_32B  = 0x11,
+	MLX5_QPC_CS_REQ_UP_TO_64B  = 0x22,
+};
+
+enum {
+	MLX5_QPC_CS_RES_DISABLE    = 0x0,
+	MLX5_QPC_CS_RES_UP_TO_32B  = 0x1,
+	MLX5_QPC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+	u8         state[0x4];
+	u8         reserved_0[0x4];
+	u8         st[0x8];
+	u8         reserved_1[0x3];
+	u8         pm_state[0x2];
+	u8         reserved_2[0x7];
+	u8         end_padding_mode[0x2];
+	u8         reserved_3[0x2];
+
+	u8         wq_signature[0x1];
+	u8         block_lb_mc[0x1];
+	u8         atomic_like_write_en[0x1];
+	u8         latency_sensitive[0x1];
+	u8         reserved_4[0x1];
+	u8         drain_sigerr[0x1];
+	u8         reserved_5[0x2];
+	u8         pd[0x18];
+
+	u8         mtu[0x3];
+	u8         log_msg_max[0x5];
+	u8         reserved_6[0x1];
+	u8         log_rq_size[0x4];
+	u8         log_rq_stride[0x3];
+	u8         no_sq[0x1];
+	u8         log_sq_size[0x4];
+	u8         reserved_7[0x6];
+	u8         rlky[0x1];
+	u8         reserved_8[0x4];
+
+	u8         counter_set_id[0x8];
+	u8         uar_page[0x18];
+
+	u8         reserved_9[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_10[0x3];
+	u8         log_page_size[0x5];
+	u8         remote_qpn[0x18];
+
+	struct mlx5_ifc_ads_bits primary_address_path;
+
+	struct mlx5_ifc_ads_bits secondary_address_path;
+
+	u8         log_ack_req_freq[0x4];
+	u8         reserved_11[0x4];
+	u8         log_sra_max[0x3];
+	u8         reserved_12[0x2];
+	u8         retry_count[0x3];
+	u8         rnr_retry[0x3];
+	u8         reserved_13[0x1];
+	u8         fre[0x1];
+	u8         cur_rnr_retry[0x3];
+	u8         cur_retry_count[0x3];
+	u8         reserved_14[0x5];
+
+	u8         reserved_15[0x20];
+
+	u8         reserved_16[0x8];
+	u8         next_send_psn[0x18];
+
+	u8         reserved_17[0x8];
+	u8         cqn_snd[0x18];
+
+	u8         reserved_18[0x40];
+
+	u8         reserved_19[0x8];
+	u8         last_acked_psn[0x18];
+
+	u8         reserved_20[0x8];
+	u8         ssn[0x18];
+
+	u8         reserved_21[0x8];
+	u8         log_rra_max[0x3];
+	u8         reserved_22[0x1];
+	u8         atomic_mode[0x4];
+	u8         rre[0x1];
+	u8         rwe[0x1];
+	u8         rae[0x1];
+	u8         reserved_23[0x1];
+	u8         page_offset[0x6];
+	u8         reserved_24[0x3];
+	u8         cd_slave_receive[0x1];
+	u8         cd_slave_send[0x1];
+	u8         cd_master[0x1];
+
+	u8         reserved_25[0x3];
+	u8         min_rnr_nak[0x5];
+	u8         next_rcv_psn[0x18];
+
+	u8         reserved_26[0x8];
+	u8         xrcd[0x18];
+
+	u8         reserved_27[0x8];
+	u8         cqn_rcv[0x18];
+
+	u8         dbr_addr[0x40];
+
+	u8         q_key[0x20];
+
+	u8         reserved_28[0x5];
+	u8         rq_type[0x3];
+	u8         srqn_rmpn[0x18];
+
+	u8         reserved_29[0x8];
+	u8         rmsn[0x18];
+
+	u8         hw_sq_wqebb_counter[0x10];
+	u8         sw_sq_wqebb_counter[0x10];
+
+	u8         hw_rq_counter[0x20];
+
+	u8         sw_rq_counter[0x20];
+
+	u8         reserved_30[0x20];
+
+	u8         reserved_31[0xf];
+	u8         cgs[0x1];
+	u8         cs_req[0x8];
+	u8         cs_res[0x8];
+
+	u8         dc_access_key[0x40];
+
+	u8         reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+	u8         source_l3_address[16][0x8];
+
+	u8         reserved_0[0x3];
+	u8         vlan_valid[0x1];
+	u8         vlan_id[0xc];
+	u8         source_mac_47_32[0x10];
+
+	u8         source_mac_31_0[0x20];
+
+	u8         reserved_1[0x14];
+	u8         roce_l3_type[0x4];
+	u8         roce_version[0x8];
+
+	u8         reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+	struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+	struct mlx5_ifc_odp_cap_bits odp_cap;
+	struct mlx5_ifc_atomic_caps_bits atomic_caps;
+	struct mlx5_ifc_roce_cap_bits roce_cap;
+	struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+	struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+	u8         reserved_0[0x8000];
+};
+
+enum {
+	MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
+	MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
+	MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+	u8         reserved_0[0x20];
+
+	u8         group_id[0x20];
+
+	u8         reserved_1[0x8];
+	u8         flow_tag[0x18];
+
+	u8         reserved_2[0x10];
+	u8         action[0x10];
+
+	u8         reserved_3[0x8];
+	u8         destination_list_size[0x18];
+
+	u8         reserved_4[0x160];
+
+	struct mlx5_ifc_fte_match_param_bits match_value;
+
+	u8         reserved_5[0x600];
+
+	struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+	MLX5_XRC_SRQC_STATE_GOOD   = 0x0,
+	MLX5_XRC_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+	u8         state[0x4];
+	u8         log_xrc_srq_size[0x4];
+	u8         reserved_0[0x18];
+
+	u8         wq_signature[0x1];
+	u8         cont_srq[0x1];
+	u8         reserved_1[0x1];
+	u8         rlky[0x1];
+	u8         basic_cyclic_rcv_wqe[0x1];
+	u8         log_rq_stride[0x3];
+	u8         xrcd[0x18];
+
+	u8         page_offset[0x6];
+	u8         reserved_2[0x2];
+	u8         cqn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         user_index_equal_xrc_srqn[0x1];
+	u8         reserved_4[0x1];
+	u8         log_page_size[0x6];
+	u8         user_index[0x18];
+
+	u8         reserved_5[0x20];
+
+	u8         reserved_6[0x8];
+	u8         pd[0x18];
+
+	u8         lwm[0x10];
+	u8         wqe_cnt[0x10];
+
+	u8         reserved_7[0x40];
+
+	u8         db_record_addr_h[0x20];
+
+	u8         db_record_addr_l[0x1e];
+	u8         reserved_8[0x2];
+
+	u8         reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+	u8         packets[0x40];
+
+	u8         octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+	u8         reserved_0[0xc];
+	u8         prio[0x4];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x100];
+
+	u8         reserved_3[0x8];
+	u8         transport_domain[0x18];
+
+	u8         reserved_4[0x3c0];
+};
+
+enum {
+	MLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,
+	MLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,
+};
+
+enum {
+	MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
+	MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
+};
+
+enum {
+	MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
+	MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
+	MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+};
+
+enum {
+	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_    = 0x1,
+	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_  = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+	u8         reserved_0[0x20];
+
+	u8         disp_type[0x4];
+	u8         reserved_1[0x1c];
+
+	u8         reserved_2[0x40];
+
+	u8         reserved_3[0x4];
+	u8         lro_timeout_period_usecs[0x10];
+	u8         lro_enable_mask[0x4];
+	u8         lro_max_ip_payload_size[0x8];
+
+	u8         reserved_4[0x40];
+
+	u8         reserved_5[0x8];
+	u8         inline_rqn[0x18];
+
+	u8         rx_hash_symmetric[0x1];
+	u8         reserved_6[0x1];
+	u8         tunneled_offload_en[0x1];
+	u8         reserved_7[0x5];
+	u8         indirect_table[0x18];
+
+	u8         rx_hash_fn[0x4];
+	u8         reserved_8[0x2];
+	u8         self_lb_block[0x2];
+	u8         transport_domain[0x18];
+
+	u8         rx_hash_toeplitz_key[10][0x20];
+
+	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+	u8         reserved_9[0x4c0];
+};
+
+enum {
+	MLX5_SRQC_STATE_GOOD   = 0x0,
+	MLX5_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+	u8         state[0x4];
+	u8         log_srq_size[0x4];
+	u8         reserved_0[0x18];
+
+	u8         wq_signature[0x1];
+	u8         cont_srq[0x1];
+	u8         reserved_1[0x1];
+	u8         rlky[0x1];
+	u8         reserved_2[0x1];
+	u8         log_rq_stride[0x3];
+	u8         xrcd[0x18];
+
+	u8         page_offset[0x6];
+	u8         reserved_3[0x2];
+	u8         cqn[0x18];
+
+	u8         reserved_4[0x20];
+
+	u8         reserved_5[0x2];
+	u8         log_page_size[0x6];
+	u8         reserved_6[0x18];
+
+	u8         reserved_7[0x20];
+
+	u8         reserved_8[0x8];
+	u8         pd[0x18];
+
+	u8         lwm[0x10];
+	u8         wqe_cnt[0x10];
+
+	u8         reserved_9[0x40];
+
+	u8         dbr_addr[0x40];
+
+	u8         reserved_10[0x80];
+};
+
+enum {
+	MLX5_SQC_STATE_RST  = 0x0,
+	MLX5_SQC_STATE_RDY  = 0x1,
+	MLX5_SQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+	u8         rlky[0x1];
+	u8         cd_master[0x1];
+	u8         fre[0x1];
+	u8         flush_in_error_en[0x1];
+	u8         reserved_0[0x4];
+	u8         state[0x4];
+	u8         reserved_1[0x14];
+
+	u8         reserved_2[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_3[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_4[0xa0];
+
+	u8         tis_lst_sz[0x10];
+	u8         reserved_5[0x10];
+
+	u8         reserved_6[0x40];
+
+	u8         reserved_7[0x8];
+	u8         tis_num_0[0x18];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+	u8         reserved_0[0xa0];
+
+	u8         reserved_1[0x10];
+	u8         rqt_max_size[0x10];
+
+	u8         reserved_2[0x10];
+	u8         rqt_actual_size[0x10];
+
+	u8         reserved_3[0x6a0];
+
+	struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
+	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
+};
+
+enum {
+	MLX5_RQC_STATE_RST  = 0x0,
+	MLX5_RQC_STATE_RDY  = 0x1,
+	MLX5_RQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+	u8         rlky[0x1];
+	u8         reserved_0[0x2];
+	u8         vsd[0x1];
+	u8         mem_rq_type[0x4];
+	u8         state[0x4];
+	u8         reserved_1[0x1];
+	u8         flush_in_error_en[0x1];
+	u8         reserved_2[0x12];
+
+	u8         reserved_3[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_4[0x8];
+	u8         cqn[0x18];
+
+	u8         counter_set_id[0x8];
+	u8         reserved_5[0x18];
+
+	u8         reserved_6[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_7[0xe0];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+	MLX5_RMPC_STATE_RDY  = 0x1,
+	MLX5_RMPC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+	u8         reserved_0[0x8];
+	u8         state[0x4];
+	u8         reserved_1[0x14];
+
+	u8         basic_cyclic_rcv_wqe[0x1];
+	u8         reserved_2[0x1f];
+
+	u8         reserved_3[0x140];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+	MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS  = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+	u8         reserved_0[0x1f];
+	u8         roce_en[0x1];
+
+	u8         reserved_1[0x760];
+
+	u8         reserved_2[0x5];
+	u8         allowed_list_type[0x3];
+	u8         reserved_3[0xc];
+	u8         allowed_list_size[0xc];
+
+	struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+	u8         reserved_4[0x20];
+
+	u8         current_uc_mac_address[0][0x40];
+};
+
+enum {
+	MLX5_MKC_ACCESS_MODE_PA    = 0x0,
+	MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
+	MLX5_MKC_ACCESS_MODE_KLMS  = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+	u8         reserved_0[0x1];
+	u8         free[0x1];
+	u8         reserved_1[0xd];
+	u8         small_fence_on_rdma_read_response[0x1];
+	u8         umr_en[0x1];
+	u8         a[0x1];
+	u8         rw[0x1];
+	u8         rr[0x1];
+	u8         lw[0x1];
+	u8         lr[0x1];
+	u8         access_mode[0x2];
+	u8         reserved_2[0x8];
+
+	u8         qpn[0x18];
+	u8         mkey_7_0[0x8];
+
+	u8         reserved_3[0x20];
+
+	u8         length64[0x1];
+	u8         bsf_en[0x1];
+	u8         sync_umr[0x1];
+	u8         reserved_4[0x2];
+	u8         expected_sigerr_count[0x1];
+	u8         reserved_5[0x1];
+	u8         en_rinval[0x1];
+	u8         pd[0x18];
+
+	u8         start_addr[0x40];
+
+	u8         len[0x40];
+
+	u8         bsf_octword_size[0x20];
+
+	u8         reserved_6[0x80];
+
+	u8         translations_octword_size[0x20];
+
+	u8         reserved_7[0x1b];
+	u8         log_page_size[0x5];
+
+	u8         reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+	u8         reserved_0[0x10];
+	u8         pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+	u8         array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+	u8         field_select[0x20];
+
+	u8         reserved_0[0xe0];
+
+	u8         sm_virt_aware[0x1];
+	u8         has_smi[0x1];
+	u8         has_raw[0x1];
+	u8         grh_required[0x1];
+	u8         reserved_1[0xc];
+	u8         port_physical_state[0x4];
+	u8         vport_state_policy[0x4];
+	u8         port_state[0x4];
+	u8         vport_state[0x4];
+
+	u8         reserved_2[0x20];
+
+	u8         system_image_guid[0x40];
+
+	u8         port_guid[0x40];
+
+	u8         node_guid[0x40];
+
+	u8         cap_mask1[0x20];
+
+	u8         cap_mask1_field_select[0x20];
+
+	u8         cap_mask2[0x20];
+
+	u8         cap_mask2_field_select[0x20];
+
+	u8         reserved_3[0x80];
+
+	u8         lid[0x10];
+	u8         reserved_4[0x4];
+	u8         init_type_reply[0x4];
+	u8         lmc[0x3];
+	u8         subnet_timeout[0x5];
+
+	u8         sm_lid[0x10];
+	u8         sm_sl[0x4];
+	u8         reserved_5[0xc];
+
+	u8         qkey_violation_counter[0x10];
+	u8         pkey_violation_counter[0x10];
+
+	u8         reserved_6[0xca0];
+};
+
+enum {
+	MLX5_EQC_STATUS_OK                = 0x0,
+	MLX5_EQC_STATUS_EQ_WRITE_FAILURE  = 0xa,
+};
+
+enum {
+	MLX5_EQC_ST_ARMED  = 0x9,
+	MLX5_EQC_ST_FIRED  = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+	u8         status[0x4];
+	u8         reserved_0[0x9];
+	u8         ec[0x1];
+	u8         oi[0x1];
+	u8         reserved_1[0x5];
+	u8         st[0x4];
+	u8         reserved_2[0x8];
+
+	u8         reserved_3[0x20];
+
+	u8         reserved_4[0x14];
+	u8         page_offset[0x6];
+	u8         reserved_5[0x6];
+
+	u8         reserved_6[0x3];
+	u8         log_eq_size[0x5];
+	u8         uar_page[0x18];
+
+	u8         reserved_7[0x20];
+
+	u8         reserved_8[0x18];
+	u8         intr[0x8];
+
+	u8         reserved_9[0x3];
+	u8         log_page_size[0x5];
+	u8         reserved_10[0x18];
+
+	u8         reserved_11[0x60];
+
+	u8         reserved_12[0x8];
+	u8         consumer_counter[0x18];
+
+	u8         reserved_13[0x8];
+	u8         producer_counter[0x18];
+
+	u8         reserved_14[0x80];
+};
+
+enum {
+	MLX5_DCTC_STATE_ACTIVE    = 0x0,
+	MLX5_DCTC_STATE_DRAINING  = 0x1,
+	MLX5_DCTC_STATE_DRAINED   = 0x2,
+};
+
+enum {
+	MLX5_DCTC_CS_RES_DISABLE    = 0x0,
+	MLX5_DCTC_CS_RES_NA         = 0x1,
+	MLX5_DCTC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+enum {
+	MLX5_DCTC_MTU_256_BYTES  = 0x1,
+	MLX5_DCTC_MTU_512_BYTES  = 0x2,
+	MLX5_DCTC_MTU_1K_BYTES   = 0x3,
+	MLX5_DCTC_MTU_2K_BYTES   = 0x4,
+	MLX5_DCTC_MTU_4K_BYTES   = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+	u8         reserved_0[0x4];
+	u8         state[0x4];
+	u8         reserved_1[0x18];
+
+	u8         reserved_2[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_3[0x8];
+	u8         cqn[0x18];
+
+	u8         counter_set_id[0x8];
+	u8         atomic_mode[0x4];
+	u8         rre[0x1];
+	u8         rwe[0x1];
+	u8         rae[0x1];
+	u8         atomic_like_write_en[0x1];
+	u8         latency_sensitive[0x1];
+	u8         rlky[0x1];
+	u8         free_ar[0x1];
+	u8         reserved_4[0xd];
+
+	u8         reserved_5[0x8];
+	u8         cs_res[0x8];
+	u8         reserved_6[0x3];
+	u8         min_rnr_nak[0x5];
+	u8         reserved_7[0x8];
+
+	u8         reserved_8[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_9[0x8];
+	u8         pd[0x18];
+
+	u8         tclass[0x8];
+	u8         reserved_10[0x4];
+	u8         flow_label[0x14];
+
+	u8         dc_access_key[0x40];
+
+	u8         reserved_11[0x5];
+	u8         mtu[0x3];
+	u8         port[0x8];
+	u8         pkey_index[0x10];
+
+	u8         reserved_12[0x8];
+	u8         my_addr_index[0x8];
+	u8         reserved_13[0x8];
+	u8         hop_limit[0x8];
+
+	u8         dc_access_key_violation_count[0x20];
+
+	u8         reserved_14[0x14];
+	u8         dei_cfi[0x1];
+	u8         eth_prio[0x3];
+	u8         ecn[0x2];
+	u8         dscp[0x6];
+
+	u8         reserved_15[0x40];
+};
+
+enum {
+	MLX5_CQC_STATUS_OK             = 0x0,
+	MLX5_CQC_STATUS_CQ_OVERFLOW    = 0x9,
+	MLX5_CQC_STATUS_CQ_WRITE_FAIL  = 0xa,
+};
+
+enum {
+	MLX5_CQC_CQE_SZ_64_BYTES   = 0x0,
+	MLX5_CQC_CQE_SZ_128_BYTES  = 0x1,
+};
+
+enum {
+	MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED  = 0x6,
+	MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED            = 0x9,
+	MLX5_CQC_ST_FIRED                                 = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+	u8         status[0x4];
+	u8         reserved_0[0x4];
+	u8         cqe_sz[0x3];
+	u8         cc[0x1];
+	u8         reserved_1[0x1];
+	u8         scqe_break_moderation_en[0x1];
+	u8         oi[0x1];
+	u8         reserved_2[0x2];
+	u8         cqe_zip_en[0x1];
+	u8         mini_cqe_res_format[0x2];
+	u8         st[0x4];
+	u8         reserved_3[0x8];
+
+	u8         reserved_4[0x20];
+
+	u8         reserved_5[0x14];
+	u8         page_offset[0x6];
+	u8         reserved_6[0x6];
+
+	u8         reserved_7[0x3];
+	u8         log_cq_size[0x5];
+	u8         uar_page[0x18];
+
+	u8         reserved_8[0x4];
+	u8         cq_period[0xc];
+	u8         cq_max_count[0x10];
+
+	u8         reserved_9[0x18];
+	u8         c_eqn[0x8];
+
+	u8         reserved_10[0x3];
+	u8         log_page_size[0x5];
+	u8         reserved_11[0x18];
+
+	u8         reserved_12[0x20];
+
+	u8         reserved_13[0x8];
+	u8         last_notified_index[0x18];
+
+	u8         reserved_14[0x8];
+	u8         last_solicit_index[0x18];
+
+	u8         reserved_15[0x8];
+	u8         consumer_counter[0x18];
+
+	u8         reserved_16[0x8];
+	u8         producer_counter[0x18];
+
+	u8         reserved_17[0x40];
+
+	u8         dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+	struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+	struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+	struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+	u8         reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+	u8         reserved_0[0xc0];
+
+	u8         reserved_1[0x8];
+	u8         ieee_vendor_id[0x18];
+
+	u8         reserved_2[0x10];
+	u8         vsd_vendor_id[0x10];
+
+	u8         vsd[208][0x8];
+
+	u8         vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+	struct mlx5_ifc_modify_field_select_bits modify_field_select;
+	struct mlx5_ifc_resize_field_select_bits resize_field_select;
+	u8         reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+	struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+	struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+	struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+	u8         reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+	struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+	struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+	struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+	u8         reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+	struct mlx5_ifc_comp_event_bits comp_event;
+	struct mlx5_ifc_dct_events_bits dct_events;
+	struct mlx5_ifc_qp_events_bits qp_events;
+	struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+	struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+	struct mlx5_ifc_cq_error_bits cq_error;
+	struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+	struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+	struct mlx5_ifc_gpio_event_bits gpio_event;
+	struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+	struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+	struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+	u8         reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+	u8         reserved_0[0x100];
+
+	u8         assert_existptr[0x20];
+
+	u8         assert_callra[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         fw_version[0x20];
+
+	u8         hw_id[0x20];
+
+	u8         reserved_2[0x20];
+
+	u8         irisc_index[0x8];
+	u8         synd[0x8];
+	u8         ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+	u8         no_lb[0x1];
+	u8         reserved_0[0x7];
+	u8         port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+enum {
+	MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE  = 0x0,
+	MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE     = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         profile[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         roce_address_index[0x10];
+	u8         reserved_2[0x10];
+
+	u8         reserved_3[0x20];
+
+	struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+enum {
+	MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL   = 0x0,
+	MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE  = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x20];
+
+	u8         reserved_3[0x6];
+	u8         demux_mode[0x2];
+	u8         reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x60];
+
+	u8         reserved_3[0x8];
+	u8         table_index[0x18];
+
+	u8         reserved_4[0x20];
+
+	u8         reserved_5[0x13];
+	u8         vlan_valid[0x1];
+	u8         vlan[0xc];
+
+	struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+	u8         reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         current_issi[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
 };
 
 struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,7 +2764,811 @@
 
 	u8         reserved_2[0x40];
 
-	struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+	union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_set_fte_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_5[0x40];
+
+	u8         flow_index[0x20];
+
+	u8         reserved_6[0xe0];
+
+	struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+	u8         reserved_2[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+enum {
+	MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN  = 0x0,
+	MLX5_QUERY_VPORT_STATE_OUT_STATE_UP    = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x20];
+
+	u8         reserved_2[0x18];
+	u8         admin_state[0x4];
+	u8         state[0x4];
+};
+
+enum {
+	MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_traffic_counter_bits received_errors;
+
+	struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+	struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+	struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+	struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+	u8         reserved_2[0xa00];
+};
+
+enum {
+	MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x60];
+
+	u8         clear[0x1];
+	u8         reserved_4[0x1f];
+
+	u8         reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xc0];
+
+	struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_srqc_bits srq_context_entry;
+
+	u8         reserved_2[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xc0];
+
+	struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         sqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x20];
+
+	u8         resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xc0];
+
+	struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xc0];
+
+	struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         roce_address_index[0x10];
+	u8         reserved_2[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xc0];
+
+	struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_2[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_3[0x80];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         rx_write_requests[0x20];
+
+	u8         reserved_2[0x20];
+
+	u8         rx_read_requests[0x20];
+
+	u8         reserved_3[0x20];
+
+	u8         rx_atomic_requests[0x20];
+
+	u8         reserved_4[0x20];
+
+	u8         rx_dct_connect[0x20];
+
+	u8         reserved_5[0x20];
+
+	u8         out_of_buffer[0x20];
+
+	u8         reserved_6[0x20];
+
+	u8         out_of_sequence[0x20];
+
+	u8         reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x80];
+
+	u8         clear[0x1];
+	u8         reserved_3[0x1f];
+
+	u8         reserved_4[0x18];
+	u8         counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x10];
+	u8         function_id[0x10];
+
+	u8         num_pages[0x20];
+};
+
+enum {
+	MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES     = 0x1,
+	MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES     = 0x2,
+	MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES  = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         function_id[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x5];
+	u8         allowed_list_type[0x3];
+	u8         reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+	u8         reserved_2[0x600];
+
+	u8         bsf0_klm0_pas_mtt0_1[16][0x8];
+
+	u8         bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         mkey_index[0x18];
+
+	u8         pg_access[0x1];
+	u8         reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xa0];
+
+	u8         reserved_2[0x13];
+	u8         vlan_valid[0x1];
+	u8         vlan[0xc];
+
+	struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+	u8         reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x60];
+
+	u8         reserved_3[0x8];
+	u8         table_index[0x18];
+
+	u8         reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x10];
+	u8         current_issi[0x10];
+
+	u8         reserved_2[0xa0];
+
+	u8         supported_issi_reserved[76][0x8];
+	u8         supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x10];
+	u8         pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x20];
+
+	u8         gids_num[0x10];
+	u8         reserved_2[0x10];
+
+	struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x10];
+	u8         gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	union mlx5_ifc_hca_cap_union_bits capability;
 };
 
 struct mlx5_ifc_query_hca_cap_in_bits {
@@ -326,24 +3581,3203 @@
 	u8         reserved_2[0x40];
 };
 
-struct mlx5_ifc_query_hca_cap_out_bits {
+struct mlx5_ifc_query_flow_table_out_bits {
 	u8         status[0x8];
 	u8         reserved_0[0x18];
 
 	u8         syndrome[0x20];
 
-	u8         reserved_1[0x40];
+	u8         reserved_1[0x80];
 
-	u8         capability_struct[256][0x8];
+	u8         reserved_2[0x8];
+	u8         level[0x8];
+	u8         reserved_3[0x8];
+	u8         log_size[0x8];
+
+	u8         reserved_4[0x120];
 };
 
-struct mlx5_ifc_set_hca_cap_out_bits {
+struct mlx5_ifc_query_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x1c0];
+
+	struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_5[0x40];
+
+	u8         flow_index[0x20];
+
+	u8         reserved_6[0xe0];
+};
+
+enum {
+	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0xa0];
+
+	u8         start_flow_index[0x20];
+
+	u8         reserved_2[0x20];
+
+	u8         end_flow_index[0x20];
+
+	u8         reserved_3[0xa0];
+
+	u8         reserved_4[0x18];
+	u8         match_criteria_enable[0x8];
+
+	struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+	u8         reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         group_id[0x20];
+
+	u8         reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
 	u8         status[0x8];
 	u8         reserved_0[0x18];
 
 	u8         syndrome[0x20];
 
 	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_eqc_bits eq_context_entry;
+
+	u8         reserved_2[0x40];
+
+	u8         event_bitmask[0x40];
+
+	u8         reserved_3[0x580];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_dctc_bits dct_context_entry;
+
+	u8         reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_cqc_bits cq_context;
+
+	u8         reserved_2[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x20];
+
+	u8         enable[0x1];
+	u8         tag_enable[0x1];
+	u8         reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x18];
+	u8         priority[0x4];
+	u8         cong_protocol[0x4];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         cur_flows[0x20];
+
+	u8         sum_flows[0x20];
+
+	u8         cnp_ignored_high[0x20];
+
+	u8         cnp_ignored_low[0x20];
+
+	u8         cnp_handled_high[0x20];
+
+	u8         cnp_handled_low[0x20];
+
+	u8         reserved_2[0x100];
+
+	u8         time_stamp_high[0x20];
+
+	u8         time_stamp_low[0x20];
+
+	u8         accumulators_period[0x20];
+
+	u8         ecn_marked_roce_packets_high[0x20];
+
+	u8         ecn_marked_roce_packets_low[0x20];
+
+	u8         cnps_sent_high[0x20];
+
+	u8         cnps_sent_low[0x20];
+
+	u8         reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         clear[0x1];
+	u8         reserved_2[0x1f];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x1c];
+	u8         cong_protocol[0x4];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         error[0x1];
+	u8         reserved_2[0x4];
+	u8         rdma[0x1];
+	u8         read_write[0x1];
+	u8         req_res[0x1];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x18];
+	u8         admin_state[0x4];
+	u8         reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_4[0x40];
+
+	struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_4[0x40];
+
+	struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         sq_state[0x4];
+	u8         reserved_2[0x4];
+	u8         sqn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_4[0x40];
+
+	struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_4[0x40];
+
+	struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         rq_state[0x4];
+	u8         reserved_2[0x4];
+	u8         rqn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_4[0x40];
+
+	struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+	u8	   reserved[0x20];
+
+	u8         reserved1[0x1f];
+	u8         lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         rmp_state[0x4];
+	u8         reserved_2[0x4];
+	u8         rmpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+	u8         reserved_4[0x40];
+
+	struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+	u8         reserved_0[0x1c];
+	u8         permanent_address[0x1];
+	u8         addresses_list[0x1];
+	u8         roce_en[0x1];
+	u8         reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xf];
+	u8         vport_number[0x10];
+
+	struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+	u8         reserved_3[0x780];
+
+	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_2[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_3[0x20];
+
+	struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+enum {
+	MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ  = 0x0,
+	MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ  = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         cqn[0x18];
+
+	union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+	struct mlx5_ifc_cqc_bits cq_context;
+
+	u8         reserved_3[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x18];
+	u8         priority[0x4];
+	u8         cong_protocol[0x4];
+
+	u8         enable[0x1];
+	u8         tag_enable[0x1];
+	u8         reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x1c];
+	u8         cong_protocol[0x4];
+
+	union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+	u8         reserved_3[0x80];
+
+	union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         output_num_entries[0x20];
+
+	u8         reserved_1[0x20];
+
+	u8         pas[0][0x40];
+};
+
+enum {
+	MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL     = 0x0,
+	MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS  = 0x1,
+	MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES    = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         function_id[0x10];
+
+	u8         input_num_entries[0x20];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         remote_lid[0x10];
+	u8         reserved_2[0x8];
+	u8         port[0x8];
+
+	u8         reserved_3[0x20];
+
+	u8         mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_4[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         packet_headers_log[128][0x8];
+
+	u8         packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_3[0x20];
+
+	u8         eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         function_id[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         function_id[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         sqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         psvn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         mkey_index[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         group_id[0x20];
+
+	u8         reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x20];
+
+	u8         reserved_3[0x10];
+	u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x60];
+
+	u8         reserved_3[0x8];
+	u8         table_index[0x18];
+
+	u8         reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_5[0x40];
+
+	u8         flow_index[0x20];
+
+	u8         reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         xrcd[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         uar[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         transport_domain[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x18];
+	u8         counter_set_id[0x8];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         pd[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+	u8         reserved_3[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0xc0];
+
+	struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0xc0];
+
+	struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	struct mlx5_ifc_srqc_bits srq_context_entry;
+
+	u8         reserved_3[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         sqn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0xc0];
+
+	struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0xc0];
+
+	struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         rqn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0xc0];
+
+	struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0xc0];
+
+	struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_3[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_4[0x80];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         reserved_2[0x8];
+	u8         psv0_index[0x18];
+
+	u8         reserved_3[0x8];
+	u8         psv1_index[0x18];
+
+	u8         reserved_4[0x8];
+	u8         psv2_index[0x18];
+
+	u8         reserved_5[0x8];
+	u8         psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         num_psv[0x4];
+	u8         reserved_2[0x4];
+	u8         pd[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         mkey_index[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x20];
+
+	u8         pg_access[0x1];
+	u8         reserved_3[0x1f];
+
+	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+	u8         reserved_4[0x80];
+
+	u8         translations_octword_actual_size[0x20];
+
+	u8         reserved_5[0x560];
+
+	u8         klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x20];
+
+	u8         reserved_5[0x8];
+	u8         level[0x8];
+	u8         reserved_6[0x8];
+	u8         log_size[0x8];
+
+	u8         reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         group_id[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+enum {
+	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_3[0x18];
+
+	u8         reserved_4[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_5[0x20];
+
+	u8         start_flow_index[0x20];
+
+	u8         reserved_6[0x20];
+
+	u8         end_flow_index[0x20];
+
+	u8         reserved_7[0xa0];
+
+	u8         reserved_8[0x18];
+	u8         match_criteria_enable[0x8];
+
+	struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+	u8         reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	struct mlx5_ifc_eqc_bits eq_context_entry;
+
+	u8         reserved_3[0x40];
+
+	u8         event_bitmask[0x40];
+
+	u8         reserved_4[0x580];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	struct mlx5_ifc_dctc_bits dct_context_entry;
+
+	u8         reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+
+	struct mlx5_ifc_cqc_bits cq_context;
+
+	u8         reserved_3[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x4];
+	u8         min_delay[0xc];
+	u8         int_vector[0x10];
+
+	u8         reserved_2[0x20];
+};
+
+enum {
+	MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE  = 0x0,
+	MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x4];
+	u8         min_delay[0xc];
+	u8         int_vector[0x10];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_3[0x20];
+
+	u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+enum {
+	MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ  = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_3[0x10];
+	u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+enum {
+	MLX5_ARM_RQ_IN_OP_MOD_SRQ_  = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         srq_number[0x18];
+
+	u8         reserved_3[0x10];
+	u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x8];
+	u8         dct_number[0x18];
+
+	u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         xrcd[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         uar[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         transport_domain[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x18];
+	u8         counter_set_id[0x8];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x8];
+	u8         pd[0x18];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x20];
+
+	u8         reserved_3[0x10];
+	u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_1[0x40];
+
+	u8         register_data[0][0x20];
+};
+
+enum {
+	MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE  = 0x0,
+	MLX5_ACCESS_REGISTER_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x10];
+	u8         register_id[0x10];
+
+	u8         argument[0x20];
+
+	u8         register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+	u8         status[0x4];
+	u8         version[0x4];
+	u8         local_port[0x8];
+	u8         pnat[0x2];
+	u8         reserved_0[0x2];
+	u8         lane[0x4];
+	u8         reserved_1[0x8];
+
+	u8         reserved_2[0x20];
+
+	u8         reserved_3[0x7];
+	u8         polarity[0x1];
+	u8         ob_tap0[0x8];
+	u8         ob_tap1[0x8];
+	u8         ob_tap2[0x8];
+
+	u8         reserved_4[0xc];
+	u8         ob_preemp_mode[0x4];
+	u8         ob_reg[0x8];
+	u8         ob_bias[0x8];
+
+	u8         reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+	u8         status[0x4];
+	u8         version[0x4];
+	u8         local_port[0x8];
+	u8         pnat[0x2];
+	u8         reserved_0[0x2];
+	u8         lane[0x4];
+	u8         reserved_1[0x8];
+
+	u8         time_to_link_up[0x10];
+	u8         reserved_2[0xc];
+	u8         grade_lane_speed[0x4];
+
+	u8         grade_version[0x8];
+	u8         grade[0x18];
+
+	u8         reserved_3[0x4];
+	u8         height_grade_type[0x4];
+	u8         height_grade[0x18];
+
+	u8         height_dz[0x10];
+	u8         height_dv[0x10];
+
+	u8         reserved_4[0x10];
+	u8         height_sigma[0x10];
+
+	u8         reserved_5[0x20];
+
+	u8         reserved_6[0x4];
+	u8         phase_grade_type[0x4];
+	u8         phase_grade[0x18];
+
+	u8         reserved_7[0x8];
+	u8         phase_eo_pos[0x8];
+	u8         reserved_8[0x8];
+	u8         phase_eo_neg[0x8];
+
+	u8         ffe_set_tested[0x10];
+	u8         test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x1c];
+	u8         vl_hw_cap[0x4];
+
+	u8         reserved_3[0x1c];
+	u8         vl_admin[0x4];
+
+	u8         reserved_4[0x1c];
+	u8         vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         reserved_0[0x4];
+	u8         admin_status[0x4];
+	u8         reserved_1[0x4];
+	u8         oper_status[0x4];
+
+	u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0xd];
+	u8         proto_mask[0x3];
+
+	u8         reserved_2[0x40];
+
+	u8         eth_proto_capability[0x20];
+
+	u8         ib_link_width_capability[0x10];
+	u8         ib_proto_capability[0x10];
+
+	u8         reserved_3[0x20];
+
+	u8         eth_proto_admin[0x20];
+
+	u8         ib_link_width_admin[0x10];
+	u8         ib_proto_admin[0x10];
+
+	u8         reserved_4[0x20];
+
+	u8         eth_proto_oper[0x20];
+
+	u8         ib_link_width_oper[0x10];
+	u8         ib_proto_oper[0x10];
+
+	u8         reserved_5[0x20];
+
+	u8         eth_proto_lp_advertise[0x20];
+
+	u8         reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+	u8         reserved_0[0x20];
+
+	u8         algorithm_options[0x10];
+	u8         reserved_1[0x4];
+	u8         repetitions_mode[0x4];
+	u8         num_of_repetitions[0x8];
+
+	u8         grade_version[0x8];
+	u8         height_grade_type[0x4];
+	u8         phase_grade_type[0x4];
+	u8         height_grade_weight[0x8];
+	u8         phase_grade_weight[0x8];
+
+	u8         gisim_measure_bits[0x10];
+	u8         adaptive_tap_measure_bits[0x10];
+
+	u8         ber_bath_high_error_threshold[0x10];
+	u8         ber_bath_mid_error_threshold[0x10];
+
+	u8         ber_bath_low_error_threshold[0x10];
+	u8         one_ratio_high_threshold[0x10];
+
+	u8         one_ratio_high_mid_threshold[0x10];
+	u8         one_ratio_low_mid_threshold[0x10];
+
+	u8         one_ratio_low_threshold[0x10];
+	u8         ndeo_error_threshold[0x10];
+
+	u8         mixer_offset_step_size[0x10];
+	u8         reserved_2[0x8];
+	u8         mix90_phase_for_voltage_bath[0x8];
+
+	u8         mixer_offset_start[0x10];
+	u8         mixer_offset_end[0x10];
+
+	u8         reserved_3[0x15];
+	u8         ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         sub_port[0x8];
+	u8         reserved_0[0x8];
+
+	u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x5];
+	u8         prio[0x3];
+	u8         reserved_2[0x6];
+	u8         mode[0x2];
+
+	u8         reserved_3[0x20];
+
+	u8         reserved_4[0x10];
+	u8         min_threshold[0x10];
+
+	u8         reserved_5[0x10];
+	u8         max_threshold[0x10];
+
+	u8         reserved_6[0x10];
+	u8         mark_probability_denominator[0x10];
+
+	u8         reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x60];
+
+	u8         reserved_3[0x1c];
+	u8         wrps_admin[0x4];
+
+	u8         reserved_4[0x1c];
+	u8         wrps_status[0x4];
+
+	u8         reserved_5[0x8];
+	u8         up_threshold[0x8];
+	u8         reserved_6[0x8];
+	u8         down_threshold[0x8];
+
+	u8         reserved_7[0x20];
+
+	u8         reserved_8[0x1c];
+	u8         srps_admin[0x4];
+
+	u8         reserved_9[0x1c];
+	u8         srps_status[0x4];
+
+	u8         reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x8];
+	u8         lb_cap[0x8];
+	u8         reserved_3[0x8];
+	u8         lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x20];
+
+	u8         port_profile_mode[0x8];
+	u8         static_port_profile[0x8];
+	u8         active_port_profile[0x8];
+	u8         reserved_3[0x8];
+
+	u8         retransmission_active[0x8];
+	u8         fec_mode_active[0x18];
+
+	u8         reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         pnat[0x2];
+	u8         reserved_0[0x8];
+	u8         grp[0x6];
+
+	u8         clr[0x1];
+	u8         reserved_1[0x1c];
+	u8         prio_tc[0x3];
+
+	union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+	u8         reserved_0[0x3];
+	u8         single_mac[0x1];
+	u8         reserved_1[0x4];
+	u8         local_port[0x8];
+	u8         mac_47_32[0x10];
+
+	u8         mac_31_0[0x20];
+
+	u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         max_mtu[0x10];
+	u8         reserved_2[0x10];
+
+	u8         admin_mtu[0x10];
+	u8         reserved_3[0x10];
+
+	u8         oper_mtu[0x10];
+	u8         reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+	u8         reserved_0[0x8];
+	u8         module[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0x18];
+	u8         attenuation_5g[0x8];
+
+	u8         reserved_3[0x18];
+	u8         attenuation_7g[0x8];
+
+	u8         reserved_4[0x18];
+	u8         attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+	u8         reserved_0[0x8];
+	u8         module[0x8];
+	u8         reserved_1[0xc];
+	u8         module_status[0x4];
+
+	u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+	u8         module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+	u8         reserved_0[0x4];
+	u8         mlpn_status[0x4];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         e[0x1];
+	u8         reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+	u8         rxtx[0x1];
+	u8         reserved_0[0x7];
+	u8         local_port[0x8];
+	u8         reserved_1[0x8];
+	u8         width[0x8];
+
+	u8         lane0_module_mapping[0x20];
+
+	u8         lane1_module_mapping[0x20];
+
+	u8         lane2_module_mapping[0x20];
+
+	u8         lane3_module_mapping[0x20];
+
+	u8         reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+	u8         reserved_0[0x8];
+	u8         module[0x8];
+	u8         reserved_1[0x4];
+	u8         admin_status[0x4];
+	u8         reserved_2[0x4];
+	u8         oper_status[0x4];
+
+	u8         ase[0x1];
+	u8         ee[0x1];
+	u8         reserved_3[0x1c];
+	u8         e[0x2];
+
+	u8         reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+	u8         reserved_0[0x4];
+	u8         profile_id[0xc];
+	u8         reserved_1[0x4];
+	u8         proto_mask[0x4];
+	u8         reserved_2[0x8];
+
+	u8         reserved_3[0x10];
+	u8         lane_speed[0x10];
+
+	u8         reserved_4[0x17];
+	u8         lpbf[0x1];
+	u8         fec_mode_policy[0x8];
+
+	u8         retransmission_capability[0x8];
+	u8         fec_mode_capability[0x18];
+
+	u8         retransmission_support_admin[0x8];
+	u8         fec_mode_support_admin[0x18];
+
+	u8         retransmission_request_admin[0x8];
+	u8         fec_mode_request_admin[0x18];
+
+	u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x8];
+	u8         ib_port[0x8];
+
+	u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0xd];
+	u8         lbf_mode[0x3];
+
+	u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         dic[0x1];
+	u8         reserved_2[0x19];
+	u8         ipg[0x4];
+	u8         reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0xe0];
+
+	u8         port_filter[8][0x20];
+
+	u8         port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         ppan[0x4];
+	u8         reserved_2[0x4];
+	u8         prio_mask_tx[0x8];
+	u8         reserved_3[0x8];
+	u8         prio_mask_rx[0x8];
+
+	u8         pptx[0x1];
+	u8         aptx[0x1];
+	u8         reserved_4[0x6];
+	u8         pfctx[0x8];
+	u8         reserved_5[0x10];
+
+	u8         pprx[0x1];
+	u8         aprx[0x1];
+	u8         reserved_6[0x6];
+	u8         pfcrx[0x8];
+	u8         reserved_7[0x10];
+
+	u8         reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+	u8         op[0x4];
+	u8         reserved_0[0x4];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         op_admin[0x8];
+	u8         op_capability[0x8];
+	u8         op_request[0x8];
+	u8         op_active[0x8];
+
+	u8         admin[0x40];
+
+	u8         capability[0x40];
+
+	u8         request[0x40];
+
+	u8         active[0x40];
+
+	u8         reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         reserved_2[0xc];
+	u8         error_count[0x4];
+	u8         reserved_3[0x10];
+
+	u8         reserved_4[0xc];
+	u8         lane[0x4];
+	u8         reserved_5[0x8];
+	u8         error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+	u8         reserved_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_1[0x10];
+
+	u8         port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         reserved_0[0x4];
+	u8         admin_status[0x4];
+	u8         reserved_1[0x4];
+	u8         oper_status[0x4];
+
+	u8         ase[0x1];
+	u8         ee[0x1];
+	u8         reserved_2[0x1c];
+	u8         e[0x2];
+
+	u8         reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+	u8         reserved_0[0x8];
+	u8         opamp_group[0x8];
+	u8         reserved_1[0xc];
+	u8         opamp_group_type[0x4];
+
+	u8         start_index[0x10];
+	u8         reserved_2[0x4];
+	u8         num_of_indices[0xc];
+
+	u8         index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+	u8         reserved_0[0x6];
+	u8         rx_lane[0x2];
+	u8         reserved_1[0x6];
+	u8         tx_lane[0x2];
+	u8         reserved_2[0x8];
+	u8         module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+	u8         reserved_0[0x6];
+	u8         lossy[0x1];
+	u8         epsb[0x1];
+	u8         reserved_1[0xc];
+	u8         size[0xc];
+
+	u8         xoff_threshold[0x10];
+	u8         xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+	u8         node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+	u8         reserved_0[0x18];
+	u8         power_settings_level[0x8];
+
+	u8         reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+	u8         he[0x1];
+	u8         reserved_0[0x1f];
+
+	u8         reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+	u8         reserved_0[0x20];
+
+	u8         mkey[0x20];
+
+	u8         addressh_63_32[0x20];
+
+	u8         addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+	u8         dc_key[0x40];
+
+	u8         ext[0x1];
+	u8         reserved_0[0x7];
+	u8         destination_qp_dct[0x18];
+
+	u8         static_rate[0x4];
+	u8         sl_eth_prio[0x4];
+	u8         fl[0x1];
+	u8         mlid[0x7];
+	u8         rlid_udp_sport[0x10];
+
+	u8         reserved_1[0x20];
+
+	u8         rmac_47_16[0x20];
+
+	u8         rmac_15_0[0x10];
+	u8         tclass[0x8];
+	u8         hop_limit[0x8];
+
+	u8         reserved_2[0x1];
+	u8         grh[0x1];
+	u8         reserved_3[0x2];
+	u8         src_addr_index[0x8];
+	u8         flow_label[0x14];
+
+	u8         rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+	u8         reserved_0[0x10];
+	u8         function_id[0x10];
+
+	u8         num_pages[0x20];
+
+	u8         reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+	u8         reserved_0[0x8];
+	u8         event_type[0x8];
+	u8         reserved_1[0x8];
+	u8         event_sub_type[0x8];
+
+	u8         reserved_2[0xe0];
+
+	union mlx5_ifc_event_auto_bits event_data;
+
+	u8         reserved_3[0x10];
+	u8         signature[0x8];
+	u8         reserved_4[0x7];
+	u8         owner[0x1];
+};
+
+enum {
+	MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT  = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+	u8         type[0x8];
+	u8         reserved_0[0x18];
+
+	u8         input_length[0x20];
+
+	u8         input_mailbox_pointer_63_32[0x20];
+
+	u8         input_mailbox_pointer_31_9[0x17];
+	u8         reserved_1[0x9];
+
+	u8         command_input_inline_data[16][0x8];
+
+	u8         command_output_inline_data[16][0x8];
+
+	u8         output_mailbox_pointer_63_32[0x20];
+
+	u8         output_mailbox_pointer_31_9[0x17];
+	u8         reserved_2[0x9];
+
+	u8         output_length[0x20];
+
+	u8         token[0x8];
+	u8         signature[0x8];
+	u8         reserved_3[0x8];
+	u8         status[0x7];
+	u8         ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+	u8         mailbox_data[512][0x8];
+
+	u8         reserved_0[0x180];
+
+	u8         next_pointer_63_32[0x20];
+
+	u8         next_pointer_31_10[0x16];
+	u8         reserved_1[0xa];
+
+	u8         block_number[0x20];
+
+	u8         reserved_2[0x8];
+	u8         token[0x8];
+	u8         ctrl_signature[0x8];
+	u8         signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+	u8         ptag_63_32[0x20];
+
+	u8         ptag_31_8[0x18];
+	u8         reserved_0[0x6];
+	u8         wr_en[0x1];
+	u8         rd_en[0x1];
+};
+
+enum {
+	MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER  = 0x0,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED     = 0x1,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER  = 0x0,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED     = 0x1,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR              = 0x1,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC                   = 0x7,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR                 = 0x8,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR                   = 0x9,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR            = 0xa,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR                 = 0xb,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN  = 0xc,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR                    = 0xd,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV                       = 0xe,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR                    = 0xf,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR                = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+	u8         fw_rev_minor[0x10];
+	u8         fw_rev_major[0x10];
+
+	u8         cmd_interface_rev[0x10];
+	u8         fw_rev_subminor[0x10];
+
+	u8         reserved_0[0x40];
+
+	u8         cmdq_phy_addr_63_32[0x20];
+
+	u8         cmdq_phy_addr_31_12[0x14];
+	u8         reserved_1[0x2];
+	u8         nic_interface[0x2];
+	u8         log_cmdq_size[0x4];
+	u8         log_cmdq_stride[0x4];
+
+	u8         command_doorbell_vector[0x20];
+
+	u8         reserved_2[0xf00];
+
+	u8         initializing[0x1];
+	u8         reserved_3[0x4];
+	u8         nic_interface_supported[0x3];
+	u8         reserved_4[0x18];
+
+	struct mlx5_ifc_health_buffer_bits health_buffer;
+
+	u8         no_dram_nic_offset[0x20];
+
+	u8         reserved_5[0x6e40];
+
+	u8         reserved_6[0x1f];
+	u8         clear_int[0x1];
+
+	u8         health_syndrome[0x8];
+	u8         health_counter[0x18];
+
+	u8         reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+	struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+	struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+	struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+	struct mlx5_ifc_pamp_reg_bits pamp_reg;
+	struct mlx5_ifc_paos_reg_bits paos_reg;
+	struct mlx5_ifc_pcap_reg_bits pcap_reg;
+	struct mlx5_ifc_peir_reg_bits peir_reg;
+	struct mlx5_ifc_pelc_reg_bits pelc_reg;
+	struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+	struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+	struct mlx5_ifc_pifr_reg_bits pifr_reg;
+	struct mlx5_ifc_pipg_reg_bits pipg_reg;
+	struct mlx5_ifc_plbf_reg_bits plbf_reg;
+	struct mlx5_ifc_plib_reg_bits plib_reg;
+	struct mlx5_ifc_plpc_reg_bits plpc_reg;
+	struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+	struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+	struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+	struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+	struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+	struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+	struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+	struct mlx5_ifc_ppad_reg_bits ppad_reg;
+	struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+	struct mlx5_ifc_pplm_reg_bits pplm_reg;
+	struct mlx5_ifc_pplr_reg_bits pplr_reg;
+	struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+	struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+	struct mlx5_ifc_pspa_reg_bits pspa_reg;
+	struct mlx5_ifc_ptas_reg_bits ptas_reg;
+	struct mlx5_ifc_ptys_reg_bits ptys_reg;
+	struct mlx5_ifc_pude_reg_bits pude_reg;
+	struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+	struct mlx5_ifc_slrg_reg_bits slrg_reg;
+	struct mlx5_ifc_sltp_reg_bits sltp_reg;
+	u8         reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+	struct mlx5_ifc_health_buffer_bits health_buffer;
+	u8         reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+	struct mlx5_ifc_initial_seg_bits initial_seg;
+	u8         reserved_0[0x20060];
 };
 
 #endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 310b5f7..f079fb1 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -134,13 +134,21 @@
 
 enum {
 	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
+	MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE	= 3 << 2,
 	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
 };
 
 enum {
+	MLX5_SEND_WQE_DS	= 16,
 	MLX5_SEND_WQE_BB	= 64,
 };
 
+#define MLX5_SEND_WQEBB_NUM_DS	(MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+	MLX5_SEND_WQE_MAX_WQEBBS	= 16,
+};
+
 enum {
 	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
 	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
@@ -200,6 +208,23 @@
 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
 
+enum {
+	MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
+	MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
+	MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
+	MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+	u8              rsvd0[4];
+	u8              cs_flags;
+	u8              rsvd1;
+	__be16          mss;
+	__be32          rsvd2;
+	__be16          inline_hdr_sz;
+	u8              inline_hdr_start[2];
+};
+
 struct mlx5_wqe_xrc_seg {
 	__be32			xrc_srqn;
 	u8			rsvd[12];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644
index 0000000..967e0fd
--- /dev/null
+++ b/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+			     u8 port_num, u16  vf_num, u16 gid_index,
+			     union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+			      u8 port_num, u16 vf_num, u16 pkey_index,
+			      u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+				 u8 other_vport, u8 port_num,
+				 u16 vf_num,
+				 struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+					   u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+				   u64 *node_guid);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8d37e26..0038ac7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -226,6 +226,24 @@
 #endif
 };
 
+#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+	void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	__u16 offset;
+	__u16 size;
+#else
+	__u32 offset;
+#endif
+	/* we maintain a pagecount bias, so that we dont dirty cache line
+	 * containing page->_count every time we allocate a fragment.
+	 */
+	unsigned int		pagecnt_bias;
+	bool pfmemalloc;
+};
+
 typedef unsigned long __nocast vm_flags_t;
 
 /*
diff --git a/include/linux/net.h b/include/linux/net.h
index 738ea48..04aa068 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -38,7 +38,6 @@
 #define SOCK_NOSPACE		2
 #define SOCK_PASSCRED		3
 #define SOCK_PASSSEC		4
-#define SOCK_EXTERNALLY_ALLOCATED 5
 
 #ifndef ARCH_HAS_SOCKET_TYPES
 /**
@@ -208,7 +207,7 @@
 int __sock_create(struct net *net, int family, int type, int proto,
 		  struct socket **res, int kern);
 int sock_create(int family, int type, int proto, struct socket **res);
-int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
 int sock_create_lite(int family, int type, int proto, struct socket **res);
 void sock_release(struct socket *sock);
 int sock_sendmsg(struct socket *sock, struct msghdr *msg);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7d59dc6..9672781 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,7 +66,6 @@
 	NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
 	NETIF_F_HW_L2FW_DOFFLOAD_BIT,	/* Allow L2 Forwarding in Hardware */
 	NETIF_F_BUSY_POLL_BIT,		/* Busy poll */
-	NETIF_F_HW_SWITCH_OFFLOAD_BIT,  /* HW switch offload */
 
 	/*
 	 * Add your fresh new feature above and remember to update
@@ -125,7 +124,6 @@
 #define NETIF_F_HW_VLAN_STAG_TX	__NETIF_F(HW_VLAN_STAG_TX)
 #define NETIF_F_HW_L2FW_DOFFLOAD	__NETIF_F(HW_L2FW_DOFFLOAD)
 #define NETIF_F_BUSY_POLL	__NETIF_F(BUSY_POLL)
-#define NETIF_F_HW_SWITCH_OFFLOAD	__NETIF_F(HW_SWITCH_OFFLOAD)
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
@@ -161,8 +159,7 @@
  */
 #define NETIF_F_ONE_FOR_ALL	(NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
 				 NETIF_F_SG | NETIF_F_HIGHDMA |		\
-				 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
-				 NETIF_F_HW_SWITCH_OFFLOAD)
+				 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
 
 /*
  * If one device doesn't support one of these features, then disable it
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 05b9a69..e20979d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1100,6 +1100,10 @@
 						     struct ifla_vf_info *ivf);
 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
 							 int vf, int link_state);
+	int			(*ndo_get_vf_stats)(struct net_device *dev,
+						    int vf,
+						    struct ifla_vf_stats
+						    *vf_stats);
 	int			(*ndo_set_vf_port)(struct net_device *dev,
 						   int vf,
 						   struct nlattr *port[]);
@@ -1564,7 +1568,7 @@
 	const struct net_device_ops *netdev_ops;
 	const struct ethtool_ops *ethtool_ops;
 #ifdef CONFIG_NET_SWITCHDEV
-	const struct swdev_ops *swdev_ops;
+	const struct switchdev_ops *switchdev_ops;
 #endif
 
 	const struct header_ops *header_ops;
@@ -1652,7 +1656,14 @@
 	rx_handler_func_t __rcu	*rx_handler;
 	void __rcu		*rx_handler_data;
 
+#ifdef CONFIG_NET_CLS_ACT
+	struct tcf_proto __rcu  *ingress_cl_list;
+#endif
 	struct netdev_queue __rcu *ingress_queue;
+#ifdef CONFIG_NETFILTER_INGRESS
+	struct list_head	nf_hooks_ingress;
+#endif
+
 	unsigned char		broadcast[MAX_ADDR_LEN];
 #ifdef CONFIG_RFS_ACCEL
 	struct cpu_rmap		*rx_cpu_rmap;
@@ -1990,6 +2001,7 @@
 
 struct packet_offload {
 	__be16			 type;	/* This is really htons(ether_type). */
+	u16			 priority;
 	struct offload_callbacks callbacks;
 	struct list_head	 list;
 };
@@ -2552,10 +2564,6 @@
 
 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
-	if (WARN_ON(!dev_queue)) {
-		pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
-		return;
-	}
 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
@@ -2571,15 +2579,7 @@
 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
 }
 
-static inline void netif_tx_stop_all_queues(struct net_device *dev)
-{
-	unsigned int i;
-
-	for (i = 0; i < dev->num_tx_queues; i++) {
-		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-		netif_tx_stop_queue(txq);
-	}
-}
+void netif_tx_stop_all_queues(struct net_device *dev);
 
 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
@@ -2840,6 +2840,9 @@
 }
 #endif
 
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+		  unsigned int num_tx_queues);
+
 /*
  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  * as a distribution range limit for the returned value.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 63560d0..00050df 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -10,7 +10,8 @@
 #include <linux/wait.h>
 #include <linux/list.h>
 #include <linux/static_key.h>
-#include <uapi/linux/netfilter.h>
+#include <linux/netfilter_defs.h>
+
 #ifdef CONFIG_NETFILTER
 static inline int NF_DROP_GETERR(int verdict)
 {
@@ -38,9 +39,6 @@
 
 int netfilter_init(void);
 
-/* Largest hook number + 1 */
-#define NF_MAX_HOOKS 8
-
 struct sk_buff;
 
 struct nf_hook_ops;
@@ -54,10 +52,12 @@
 	struct net_device *in;
 	struct net_device *out;
 	struct sock *sk;
+	struct list_head *hook_list;
 	int (*okfn)(struct sock *, struct sk_buff *);
 };
 
 static inline void nf_hook_state_init(struct nf_hook_state *p,
+				      struct list_head *hook_list,
 				      unsigned int hook,
 				      int thresh, u_int8_t pf,
 				      struct net_device *indev,
@@ -71,6 +71,7 @@
 	p->in = indev;
 	p->out = outdev;
 	p->sk = sk;
+	p->hook_list = hook_list;
 	p->okfn = okfn;
 }
 
@@ -79,16 +80,17 @@
 			       const struct nf_hook_state *state);
 
 struct nf_hook_ops {
-	struct list_head list;
+	struct list_head 	list;
 
 	/* User fills in from here down. */
-	nf_hookfn	*hook;
-	struct module	*owner;
-	void		*priv;
-	u_int8_t	pf;
-	unsigned int	hooknum;
+	nf_hookfn		*hook;
+	struct net_device	*dev;
+	struct module		*owner;
+	void			*priv;
+	u_int8_t		pf;
+	unsigned int		hooknum;
 	/* Hooks are ordered in ascending priority. */
-	int		priority;
+	int			priority;
 };
 
 struct nf_sockopt_ops {
@@ -131,26 +133,33 @@
 #ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+				       u_int8_t pf, unsigned int hook)
 {
 	if (__builtin_constant_p(pf) &&
 	    __builtin_constant_p(hook))
 		return static_key_false(&nf_hooks_needed[pf][hook]);
 
-	return !list_empty(&nf_hooks[pf][hook]);
+	return !list_empty(nf_hook_list);
 }
 #else
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+				       u_int8_t pf, unsigned int hook)
 {
-	return !list_empty(&nf_hooks[pf][hook]);
+	return !list_empty(nf_hook_list);
 }
 #endif
 
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+	return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
+}
+
 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
  *	nf_hook_thresh - call a netfilter hook
- *	
+ *
  *	Returns 1 if the hook has allowed the packet to pass.  The function
  *	okfn must be invoked by the caller in this case.  Any other return
  *	value indicates the packet has been consumed by the hook.
@@ -166,8 +175,8 @@
 	if (nf_hooks_active(pf, hook)) {
 		struct nf_hook_state state;
 
-		nf_hook_state_init(&state, hook, thresh, pf,
-				   indev, outdev, sk, okfn);
+		nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+				   pf, indev, outdev, sk, okfn);
 		return nf_hook_slow(skb, &state);
 	}
 	return 1;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34b1723..48bb01e 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -108,8 +108,13 @@
 	atomic64_t packets;
 };
 
+struct ip_set_comment_rcu {
+	struct rcu_head rcu;
+	char str[0];
+};
+
 struct ip_set_comment {
-	char *str;
+	struct ip_set_comment_rcu __rcu *c;
 };
 
 struct ip_set_skbinfo {
@@ -122,13 +127,13 @@
 struct ip_set;
 
 #define ext_timeout(e, s)	\
-(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
 #define ext_counter(e, s)	\
-(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
 #define ext_comment(e, s)	\
-(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
 #define ext_skbinfo(e, s)	\
-(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
 
 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
 			   const struct ip_set_ext *ext,
@@ -176,6 +181,9 @@
 	/* List elements */
 	int (*list)(const struct ip_set *set, struct sk_buff *skb,
 		    struct netlink_callback *cb);
+	/* Keep listing private when resizing runs parallel */
+	void (*uref)(struct ip_set *set, struct netlink_callback *cb,
+		     bool start);
 
 	/* Return true if "b" set is the same as "a"
 	 * according to the create set parameters */
@@ -223,7 +231,7 @@
 	/* The name of the set */
 	char name[IPSET_MAXNAMELEN];
 	/* Lock protecting the set data */
-	rwlock_t lock;
+	spinlock_t lock;
 	/* References to the set */
 	u32 ref;
 	/* The core set type */
@@ -341,12 +349,11 @@
 			      cpu_to_be64((u64)skbinfo->skbmark << 32 |
 					  skbinfo->skbmarkmask))) ||
 	       (skbinfo->skbprio &&
-	        nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+		nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
 			      cpu_to_be32(skbinfo->skbprio))) ||
 	       (skbinfo->skbqueue &&
-	        nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+		nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
 			     cpu_to_be16(skbinfo->skbqueue)));
-
 }
 
 static inline void
@@ -380,12 +387,12 @@
 
 /* Netlink CB args */
 enum {
-	IPSET_CB_NET = 0,
-	IPSET_CB_DUMP,
-	IPSET_CB_INDEX,
-	IPSET_CB_ARG0,
+	IPSET_CB_NET = 0,	/* net namespace */
+	IPSET_CB_DUMP,		/* dump single set/all sets */
+	IPSET_CB_INDEX,		/* set index */
+	IPSET_CB_PRIVATE,	/* set private data */
+	IPSET_CB_ARG0,		/* type specific */
 	IPSET_CB_ARG1,
-	IPSET_CB_ARG2,
 };
 
 /* register and unregister set references */
@@ -533,29 +540,9 @@
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_comment.h>
 
-static inline int
+int
 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
-		      const void *e, bool active)
-{
-	if (SET_WITH_TIMEOUT(set)) {
-		unsigned long *timeout = ext_timeout(e, set);
-
-		if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-			htonl(active ? ip_set_timeout_get(timeout)
-				: *timeout)))
-			return -EMSGSIZE;
-	}
-	if (SET_WITH_COUNTER(set) &&
-	    ip_set_put_counter(skb, ext_counter(e, set)))
-		return -EMSGSIZE;
-	if (SET_WITH_COMMENT(set) &&
-	    ip_set_put_comment(skb, ext_comment(e, set)))
-		return -EMSGSIZE;
-	if (SET_WITH_SKBINFO(set) &&
-	    ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
-		return -EMSGSIZE;
-	return 0;
-}
+		      const void *e, bool active);
 
 #define IP_SET_INIT_KEXT(skb, opt, set)			\
 	{ .bytes = (skb)->len, .packets = 1,		\
@@ -565,8 +552,6 @@
 	{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX,	\
 	  .timeout = (set)->timeout }
 
-#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
-
 #define IPSET_CONCAT(a, b)		a##b
 #define IPSET_TOKEN(a, b)		IPSET_CONCAT(a, b)
 
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 21217ea..8d02485 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -16,41 +16,57 @@
 	return nla_data(tb);
 }
 
+/* Called from uadd only, protected by the set spinlock.
+ * The kadt functions don't use the comment extensions in any way.
+ */
 static inline void
 ip_set_init_comment(struct ip_set_comment *comment,
 		    const struct ip_set_ext *ext)
 {
+	struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
 	size_t len = ext->comment ? strlen(ext->comment) : 0;
 
-	if (unlikely(comment->str)) {
-		kfree(comment->str);
-		comment->str = NULL;
+	if (unlikely(c)) {
+		kfree_rcu(c, rcu);
+		rcu_assign_pointer(comment->c, NULL);
 	}
 	if (!len)
 		return;
 	if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
 		len = IPSET_MAX_COMMENT_SIZE;
-	comment->str = kzalloc(len + 1, GFP_ATOMIC);
-	if (unlikely(!comment->str))
+	c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+	if (unlikely(!c))
 		return;
-	strlcpy(comment->str, ext->comment, len + 1);
+	strlcpy(c->str, ext->comment, len + 1);
+	rcu_assign_pointer(comment->c, c);
 }
 
+/* Used only when dumping a set, protected by rcu_read_lock_bh() */
 static inline int
 ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
 {
-	if (!comment->str)
+	struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+
+	if (!c)
 		return 0;
-	return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+	return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
 }
 
+/* Called from uadd/udel, flush or the garbage collectors protected
+ * by the set spinlock.
+ * Called when the set is destroyed and when there can't be any user
+ * of the set data anymore.
+ */
 static inline void
 ip_set_comment_free(struct ip_set_comment *comment)
 {
-	if (unlikely(!comment->str))
+	struct ip_set_comment_rcu *c;
+
+	c = rcu_dereference_protected(comment->c, 1);
+	if (unlikely(!c))
 		return;
-	kfree(comment->str);
-	comment->str = NULL;
+	kfree_rcu(c, rcu);
+	rcu_assign_pointer(comment->c, NULL);
 }
 
 #endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 83c2f9e..1d6a935 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,38 +40,33 @@
 }
 
 static inline bool
-ip_set_timeout_test(unsigned long timeout)
+ip_set_timeout_expired(unsigned long *t)
 {
-	return timeout == IPSET_ELEM_PERMANENT ||
-	       time_is_after_jiffies(timeout);
-}
-
-static inline bool
-ip_set_timeout_expired(unsigned long *timeout)
-{
-	return *timeout != IPSET_ELEM_PERMANENT &&
-	       time_is_before_jiffies(*timeout);
+	return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
 }
 
 static inline void
-ip_set_timeout_set(unsigned long *timeout, u32 t)
+ip_set_timeout_set(unsigned long *timeout, u32 value)
 {
-	if (!t) {
+	unsigned long t;
+
+	if (!value) {
 		*timeout = IPSET_ELEM_PERMANENT;
 		return;
 	}
 
-	*timeout = msecs_to_jiffies(t * 1000) + jiffies;
-	if (*timeout == IPSET_ELEM_PERMANENT)
+	t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
+	if (t == IPSET_ELEM_PERMANENT)
 		/* Bingo! :-) */
-		(*timeout)--;
+		t--;
+	*timeout = t;
 }
 
 static inline u32
 ip_set_timeout_get(unsigned long *timeout)
 {
 	return *timeout == IPSET_ELEM_PERMANENT ? 0 :
-		jiffies_to_msecs(*timeout - jiffies)/1000;
+		jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
 }
 
 #endif	/* __KERNEL__ */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215b..286098a 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -62,6 +62,7 @@
 	void *matchinfo;
 	unsigned int hook_mask;
 	u_int8_t family;
+	bool nft_compat;
 };
 
 /**
@@ -92,6 +93,7 @@
 	void *targinfo;
 	unsigned int hook_mask;
 	u_int8_t family;
+	bool nft_compat;
 };
 
 /* Target destructor parameters */
@@ -222,13 +224,10 @@
 	unsigned int stacksize;
 	unsigned int __percpu *stackptr;
 	void ***jumpstack;
-	/* ipt_entry tables: one per CPU */
-	/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
-	void *entries[1];
+
+	unsigned char entries[0] __aligned(8);
 };
 
-#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
-			  + nr_cpu_ids * sizeof(char *))
 int xt_register_target(struct xt_target *target);
 void xt_unregister_target(struct xt_target *target);
 int xt_register_targets(struct xt_target *target, unsigned int n);
@@ -351,6 +350,57 @@
 	return ret;
 }
 
+
+/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
+ * real (percpu) counter.  On !SMP, its just the packet count,
+ * so nothing needs to be done there.
+ *
+ * xt_percpu_counter_alloc returns the address of the percpu
+ * counter, or 0 on !SMP. We force an alignment of 16 bytes
+ * so that bytes/packets share a common cache line.
+ *
+ * Hence caller must use IS_ERR_VALUE to check for error, this
+ * allows us to return 0 for single core systems without forcing
+ * callers to deal with SMP vs. NONSMP issues.
+ */
+static inline u64 xt_percpu_counter_alloc(void)
+{
+	if (nr_cpu_ids > 1) {
+		void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
+						    sizeof(struct xt_counters));
+
+		if (res == NULL)
+			return (u64) -ENOMEM;
+
+		return (u64) (__force unsigned long) res;
+	}
+
+	return 0;
+}
+static inline void xt_percpu_counter_free(u64 pcnt)
+{
+	if (nr_cpu_ids > 1)
+		free_percpu((void __percpu *) (unsigned long) pcnt);
+}
+
+static inline struct xt_counters *
+xt_get_this_cpu_counter(struct xt_counters *cnt)
+{
+	if (nr_cpu_ids > 1)
+		return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
+
+	return cnt;
+}
+
+static inline struct xt_counters *
+xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
+{
+	if (nr_cpu_ids > 1)
+		return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
+
+	return cnt;
+}
+
 struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
 void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f2fdb5a..6d80fc6 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -20,13 +20,6 @@
 #define BRNF_BRIDGED_DNAT		0x02
 #define BRNF_NF_BRIDGE_PREROUTING	0x08
 
-static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
-{
-	if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
-		return PPPOE_SES_HLEN;
-	return 0;
-}
-
 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
new file mode 100644
index 0000000..d3a7f85
--- /dev/null
+++ b/include/linux/netfilter_defs.h
@@ -0,0 +1,9 @@
+#ifndef __LINUX_NETFILTER_CORE_H_
+#define __LINUX_NETFILTER_CORE_H_
+
+#include <uapi/linux/netfilter.h>
+
+/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
+#define NF_MAX_HOOKS 8
+
+#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644
index 0000000..cb0727f
--- /dev/null
+++ b/include/linux/netfilter_ingress.h
@@ -0,0 +1,41 @@
+#ifndef _NETFILTER_INGRESS_H_
+#define _NETFILTER_INGRESS_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+	return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
+				   NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+	struct nf_hook_state state;
+
+	nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
+			   NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
+			   skb->dev, NULL, NULL);
+	return nf_hook_slow(skb, &state);
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev)
+{
+	INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+}
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+	return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+	return 0;
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev) {}
+#endif /* CONFIG_NETFILTER_INGRESS */
+#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 64dad1cc..8b7d28f 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -25,6 +25,9 @@
 struct nf_ipv6_ops {
 	int (*chk_addr)(struct net *net, const struct in6_addr *addr,
 			const struct net_device *dev, int strict);
+	void (*route_input)(struct sk_buff *skb);
+	int (*fragment)(struct sock *sk, struct sk_buff *skb,
+			int (*output)(struct sock *, struct sk_buff *));
 };
 
 extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6835c12..9120edb 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -28,6 +28,8 @@
 	__u32			dst_group;
 	__u32			flags;
 	struct sock		*sk;
+	bool			nsid_is_set;
+	int			nsid;
 };
 
 #define NETLINK_CB(skb)		(*(struct netlink_skb_parms*)&((skb)->cb))
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb63a7b..fcff8f8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2330,6 +2330,8 @@
 #define PCI_DEVICE_ID_ALTIMA_AC9100	0x03ea
 #define PCI_DEVICE_ID_ALTIMA_AC1003	0x03eb
 
+#define PCI_VENDOR_ID_CAVIUM		0x177d
+
 #define PCI_VENDOR_ID_BELKIN		0x1799
 #define PCI_DEVICE_ID_BELKIN_F5D7010V7	0x701f
 
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6858098..a26c3f8 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -181,6 +181,9 @@
 	/* PHY addresses to be ignored when probing */
 	u32 phy_mask;
 
+	/* PHY addresses to ignore the TA/read failure */
+	u32 phy_ignore_ta_mask;
+
 	/*
 	 * Pointer to an array of interrupts, each PHY's
 	 * interrupt at the index matching its address
@@ -675,6 +678,17 @@
 }
 
 /**
+ * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
+ * is RGMII (all variants)
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
+{
+	return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+		phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+}
+
+/**
  * phy_write_mmd - Convenience function for writing a register
  * on an MMD on a given PHY.
  * @phydev: The phy_device struct
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
new file mode 100644
index 0000000..ac91707
--- /dev/null
+++ b/include/linux/platform_data/nfcmrvl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _NFCMRVL_PTF_H_
+#define _NFCMRVL_PTF_H_
+
+struct nfcmrvl_platform_data {
+	/*
+	 * Generic
+	 */
+
+	/* GPIO that is wired to RESET_N signal */
+	unsigned int reset_n_io;
+	/* Tell if transport is muxed in HCI one */
+	unsigned int hci_muxed;
+
+	/*
+	 * UART specific
+	 */
+
+	/* Tell if UART needs flow control at init */
+	unsigned int flow_control;
+	/* Tell if firmware supports break control for power management */
+	unsigned int break_control;
+};
+
+#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st-nci.h
similarity index 70%
rename from include/linux/platform_data/st21nfcb.h
rename to include/linux/platform_data/st-nci.h
index b023373..d9d400a 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st-nci.h
@@ -1,7 +1,7 @@
 /*
- * Driver include for the ST21NFCB NFC chip.
+ * Driver include for ST NCI NFC chip family.
  *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -16,14 +16,14 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _ST21NFCB_NCI_H_
-#define _ST21NFCB_NCI_H_
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
 
-#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
+#define ST_NCI_DRIVER_NAME "st_nci"
 
-struct st21nfcb_nfc_platform_data {
+struct st_nci_nfc_platform_data {
 	unsigned int gpio_reset;
 	unsigned int irq_polarity;
 };
 
-#endif /* _ST21NFCB_NCI_H_ */
+#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st_nci.h
similarity index 70%
copy from include/linux/platform_data/st21nfcb.h
copy to include/linux/platform_data/st_nci.h
index b023373..d9d400a 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st_nci.h
@@ -1,7 +1,7 @@
 /*
- * Driver include for the ST21NFCB NFC chip.
+ * Driver include for ST NCI NFC chip family.
  *
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -16,14 +16,14 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _ST21NFCB_NCI_H_
-#define _ST21NFCB_NCI_H_
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
 
-#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
+#define ST_NCI_DRIVER_NAME "st_nci"
 
-struct st21nfcb_nfc_platform_data {
+struct st_nci_nfc_platform_data {
 	unsigned int gpio_reset;
 	unsigned int irq_polarity;
 };
 
-#endif /* _ST21NFCB_NCI_H_ */
+#endif /* _ST_NCI_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7b8e260..39adaa9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -79,17 +79,9 @@
 
 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
 
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
 void net_inc_ingress_queue(void);
 void net_dec_ingress_queue(void);
-#else
-static inline void net_inc_ingress_queue(void)
-{
-}
-
-static inline void net_dec_ingress_queue(void)
-{
-}
 #endif
 
 extern void rtnetlink_init(void);
@@ -122,5 +114,9 @@
 
 extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 				   struct net_device *dev, u16 mode,
-				   u32 flags, u32 mask, int nlflags);
+				   u32 flags, u32 mask, int nlflags,
+				   u32 filter_mask,
+				   int (*vlan_fill)(struct sk_buff *skb,
+						    struct net_device *dev,
+						    u32 filter_mask));
 #endif	/* __LINUX_RTNETLINK_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f15154a..d6cdd6e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,7 +34,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/netdev_features.h>
 #include <linux/sched.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
+#include <linux/splice.h>
+#include <linux/in6.h>
 
 /* A. Checksumming of received packets by device.
  *
@@ -170,13 +172,19 @@
 		BRNF_PROTO_UNCHANGED,
 		BRNF_PROTO_8021Q,
 		BRNF_PROTO_PPPOE
-	} orig_proto;
+	} orig_proto:8;
 	bool			pkt_otherhost;
+	__u16			frag_max_size;
 	unsigned int		mask;
 	struct net_device	*physindev;
-	struct net_device	*physoutdev;
-	char			neigh_header[8];
-	__be32			ipv4_daddr;
+	union {
+		struct net_device *physoutdev;
+		char neigh_header[8];
+	};
+	union {
+		__be32          ipv4_daddr;
+		struct in6_addr ipv6_daddr;
+	};
 };
 #endif
 
@@ -859,6 +867,9 @@
 					int len, int odd, struct sk_buff *skb),
 			    void *from, int length);
 
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+			 int offset, size_t size);
+
 struct skb_seq_state {
 	__u32		lower_offset;
 	__u32		upper_offset;
@@ -919,7 +930,6 @@
 	skb->hash = hash;
 }
 
-void __skb_get_hash(struct sk_buff *skb);
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
 	if (!skb->l4_hash && !skb->sw_hash)
@@ -928,6 +938,8 @@
 	return skb->hash;
 }
 
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 {
 	return skb->hash;
@@ -1935,8 +1947,8 @@
 
 	if (skb_transport_header_was_set(skb))
 		return;
-	else if (skb_flow_dissect(skb, &keys))
-		skb_set_transport_header(skb, keys.thoff);
+	else if (skb_flow_dissect_flow_keys(skb, &keys))
+		skb_set_transport_header(skb, keys.control.thoff);
 	else
 		skb_set_transport_header(skb, offset_hint);
 }
@@ -2127,10 +2139,6 @@
 		kfree_skb(skb);
 }
 
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS	   NETDEV_FRAG_PAGE_MAX_SIZE
-
 void *netdev_alloc_frag(unsigned int fragsz);
 
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2185,6 +2193,11 @@
 	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
 }
 
+static inline void skb_free_frag(void *addr)
+{
+	__free_page_frag(addr);
+}
+
 void *napi_alloc_frag(unsigned int fragsz);
 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
 				 unsigned int length, gfp_t gfp_mask);
@@ -2692,9 +2705,15 @@
 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
 			      int len, __wsum csum);
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ssize_t skb_socket_splice(struct sock *sk,
+			  struct pipe_inode_info *pipe,
+			  struct splice_pipe_desc *spd);
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
 		    struct pipe_inode_info *pipe, unsigned int len,
-		    unsigned int flags);
+		    unsigned int flags,
+		    ssize_t (*splice_cb)(struct sock *,
+					 struct pipe_inode_info *,
+					 struct splice_pipe_desc *));
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2729,8 +2748,9 @@
 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
 		    __wsum csum);
 
-static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
-					 int len, void *data, int hlen, void *buffer)
+static inline void * __must_check
+__skb_header_pointer(const struct sk_buff *skb, int offset,
+		     int len, void *data, int hlen, void *buffer)
 {
 	if (hlen - offset >= len)
 		return data + offset;
@@ -2742,8 +2762,8 @@
 	return buffer;
 }
 
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
-				       int len, void *buffer)
+static inline void * __must_check
+skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
 {
 	return __skb_header_pointer(skb, offset, len, skb->data,
 				    skb_headlen(skb), buffer);
@@ -3050,7 +3070,7 @@
 		}
 	} else if (skb->csum_bad) {
 		/* ip_summed == CHECKSUM_NONE in this case */
-		return 1;
+		return (__force __sum16)1;
 	}
 
 	skb->csum = psum;
@@ -3298,9 +3318,6 @@
 	return skb->queue_mapping != 0;
 }
 
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
-		  unsigned int num_tx_queues);
-
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 {
 #ifdef CONFIG_XFRM
@@ -3355,15 +3372,14 @@
 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
 {
 	int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
-	    skb_transport_offset(skb);
-	__u16 csum;
+		   skb_transport_offset(skb);
+	__wsum partial;
 
-	csum = csum_fold(csum_partial(skb_transport_header(skb),
-				      plen, skb->csum));
+	partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
 	skb->csum = res;
 	SKB_GSO_CB(skb)->csum_start -= plen;
 
-	return csum;
+	return csum_fold(partial);
 }
 
 static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3418,10 +3434,9 @@
 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
 
 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
-		   const struct flow_keys *keys, int hlen);
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+				     unsigned int transport_len,
+				     __sum16(*skb_chkf)(struct sk_buff *skb));
 
 /**
  * skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 083ac38..fddebc6 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -1,7 +1,10 @@
 #ifndef __SOCK_DIAG_H__
 #define __SOCK_DIAG_H__
 
+#include <linux/netlink.h>
 #include <linux/user_namespace.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
 #include <uapi/linux/sock_diag.h>
 
 struct sk_buff;
@@ -11,6 +14,7 @@
 struct sock_diag_handler {
 	__u8 family;
 	int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+	int (*get_info)(struct sk_buff *skb, struct sock *sk);
 };
 
 int sock_diag_register(const struct sock_diag_handler *h);
@@ -26,4 +30,42 @@
 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
 			     struct sk_buff *skb, int attrtype);
 
+static inline
+enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
+{
+	switch (sk->sk_family) {
+	case AF_INET:
+		switch (sk->sk_protocol) {
+		case IPPROTO_TCP:
+			return SKNLGRP_INET_TCP_DESTROY;
+		case IPPROTO_UDP:
+			return SKNLGRP_INET_UDP_DESTROY;
+		default:
+			return SKNLGRP_NONE;
+		}
+	case AF_INET6:
+		switch (sk->sk_protocol) {
+		case IPPROTO_TCP:
+			return SKNLGRP_INET6_TCP_DESTROY;
+		case IPPROTO_UDP:
+			return SKNLGRP_INET6_UDP_DESTROY;
+		default:
+			return SKNLGRP_NONE;
+		}
+	default:
+		return SKNLGRP_NONE;
+	}
+}
+
+static inline
+bool sock_diag_has_destroy_listeners(const struct sock *sk)
+{
+	const struct net *n = sock_net(sk);
+	const enum sknetlink_groups group = sock_diag_destroy_group(sk);
+
+	return group != SKNLGRP_NONE && n->diag_nlsk &&
+		netlink_has_listeners(n->diag_nlsk, group);
+}
+void sock_diag_broadcast_destroy(struct sock *sk);
+
 #endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
index e741e8b..85b8ee6 100644
--- a/include/linux/spi/cc2520.h
+++ b/include/linux/spi/cc2520.h
@@ -21,7 +21,6 @@
 	int sfd;
 	int reset;
 	int vreg;
-	bool amplified;
 };
 
 #endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7f484a2..c735f5c 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -99,6 +99,7 @@
 	int phy_addr;
 	int interface;
 	struct stmmac_mdio_bus_data *mdio_bus_data;
+	struct device_node *phy_node;
 	struct stmmac_dma_cfg *dma_cfg;
 	int clk_csr;
 	int has_gmac;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e8bbf40..48c3696 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -149,11 +149,16 @@
 				 * sum(delta(rcv_nxt)), or how many bytes
 				 * were acked.
 				 */
+	u32	segs_in;	/* RFC4898 tcpEStatsPerfSegsIn
+				 * total number of segments in.
+				 */
  	u32	rcv_nxt;	/* What we want to receive next 	*/
 	u32	copied_seq;	/* Head of yet unread data		*/
 	u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
  	u32	snd_nxt;	/* Next sequence we send		*/
-
+	u32	segs_out;	/* RFC4898 tcpEStatsPerfSegsOut
+				 * The total number of segments sent.
+				 */
 	u64	bytes_acked;	/* RFC4898 tcpEStatsAppHCThruOctetsAcked
 				 * sum(delta(snd_una)), or how many bytes
 				 * were acked.
@@ -201,6 +206,7 @@
 		syn_fastopen:1,	/* SYN includes Fast Open option */
 		syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
 		syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+		save_syn:1,	/* Save headers of SYN packet */
 		is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
 	u32	tlp_high_seq;	/* snd_nxt at the time of TLP retransmit. */
 
@@ -328,6 +334,7 @@
 	 * socket. Used to retransmit SYNACKs etc.
 	 */
 	struct request_sock *fastopen_rsk;
+	u32	*saved_syn;
 };
 
 enum tsq_flags {
@@ -395,4 +402,10 @@
 	return 0;
 }
 
+static inline void tcp_saved_syn_free(struct tcp_sock *tp)
+{
+	kfree(tp->saved_syn);
+	tp->saved_syn = NULL;
+}
+
 #endif	/* _LINUX_TCP_H */
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 4b4439e..df89c9bc 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -68,11 +68,12 @@
 };
 
 
+static inline void u64_stats_init(struct u64_stats_sync *syncp)
+{
 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-# define u64_stats_init(syncp)	seqcount_init(syncp.seq)
-#else
-# define u64_stats_init(syncp)	do { } while (0)
+	seqcount_init(&syncp->seq);
 #endif
+}
 
 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 80456f7..def59d3 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -142,6 +142,7 @@
 void ipv6_mc_remap(struct inet6_dev *idev);
 void ipv6_mc_init_dev(struct inet6_dev *idev);
 void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
 void addrconf_dad_failure(struct inet6_ifaddr *ifp);
 
 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a175ba4..4a167b3 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -39,7 +39,6 @@
 };
 
 #define UNIXCB(skb) 	(*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXSID(skb)	(&UNIXCB((skb)).secid)
 
 #define unix_state_lock(s)	spin_lock(&unix_sk(s)->lock)
 #define unix_state_unlock(s)	spin_unlock(&unix_sk(s)->lock)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 172632d..db639a4 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -74,7 +74,7 @@
 struct sock *__vsock_create(struct net *net,
 			    struct socket *sock,
 			    struct sock *parent,
-			    gfp_t priority, unsigned short type);
+			    gfp_t priority, unsigned short type, int kern);
 
 /**** TRANSPORT ****/
 
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 7dba805..38d8a34 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -365,8 +365,19 @@
 int l2cap_init(void);
 void l2cap_exit(void);
 
+#if IS_ENABLED(CONFIG_BT_BREDR)
 int sco_init(void);
 void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+	return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
 
 int mgmt_init(void);
 void mgmt_exit(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d95da83..7ca6690 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1202,6 +1202,16 @@
 	__le16   accuracy;
 } __packed;
 
+#define HCI_OP_READ_ENC_KEY_SIZE	0x1408
+struct hci_cp_read_enc_key_size {
+	__le16   handle;
+} __packed;
+struct hci_rp_read_enc_key_size {
+	__u8     status;
+	__le16   handle;
+	__u8     key_size;
+} __packed;
+
 #define HCI_OP_READ_LOCAL_AMP_INFO	0x1409
 struct hci_rp_read_local_amp_info {
 	__u8     status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a056c2b..3bd618d 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -156,16 +156,22 @@
 };
 
 struct adv_info {
-	struct delayed_work timeout_exp;
+	struct list_head list;
+	bool pending;
 	__u8	instance;
 	__u32	flags;
 	__u16	timeout;
+	__u16	remaining_time;
+	__u16	duration;
 	__u16	adv_data_len;
 	__u8	adv_data[HCI_MAX_AD_LENGTH];
 	__u16	scan_rsp_len;
 	__u8	scan_rsp_data[HCI_MAX_AD_LENGTH];
 };
 
+#define HCI_MAX_ADV_INSTANCES		5
+#define HCI_DEFAULT_ADV_DURATION	2
+
 #define HCI_MAX_SHORT_NAME_LENGTH	10
 
 /* Default LE RPA expiry time, 15 minutes */
@@ -373,7 +379,11 @@
 	__u8			scan_rsp_data[HCI_MAX_AD_LENGTH];
 	__u8			scan_rsp_data_len;
 
-	struct adv_info		adv_instance;
+	struct list_head	adv_instances;
+	unsigned int		adv_instance_cnt;
+	__u8			cur_adv_instance;
+	__u16			adv_instance_timeout;
+	struct delayed_work	adv_instance_expire;
 
 	__u8			irk[16];
 	__u32			rpa_timeout;
@@ -530,10 +540,22 @@
 /* ----- HCI interface to upper protocols ----- */
 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int l2cap_disconn_ind(struct hci_conn *hcon);
-int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
 
+#if IS_ENABLED(CONFIG_BT_BREDR)
 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+#else
+static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+				  __u8 *flags)
+{
+	return 0;
+}
+
+static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+{
+}
+#endif
 
 /* ----- Inquiry cache ----- */
 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
@@ -561,11 +583,6 @@
 	hdev->discovery.scan_duration = 0;
 }
 
-static inline void adv_info_init(struct hci_dev *hdev)
-{
-	memset(&hdev->adv_instance, 0, sizeof(struct adv_info));
-}
-
 bool hci_discovery_active(struct hci_dev *hdev);
 
 void hci_discovery_set_state(struct hci_dev *hdev, int state);
@@ -1007,6 +1024,15 @@
 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
 			       u8 bdaddr_type);
 
+void hci_adv_instances_clear(struct hci_dev *hdev);
+struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
+int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+			 u16 adv_data_len, u8 *adv_data,
+			 u16 scan_rsp_len, u8 *scan_rsp_data,
+			 u16 timeout, u16 duration);
+int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1350,6 +1376,7 @@
 int mgmt_powered(struct hci_dev *hdev, u8 powered);
 int mgmt_update_adv_data(struct hci_dev *hdev);
 void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_adv_timeout_expired(struct hci_dev *hdev);
 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
 		       bool persistent);
 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
@@ -1408,7 +1435,7 @@
 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
 		      u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
-							__u8 ltk[16]);
+		      __u8 ltk[16], __u8 key_size);
 
 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
 			       u8 *bdaddr_type);
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index ea6546d..c28aca2 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -63,6 +63,9 @@
 	BOND_OPT_LP_INTERVAL,
 	BOND_OPT_SLAVES,
 	BOND_OPT_TLB_DYNAMIC_LB,
+	BOND_OPT_AD_ACTOR_SYS_PRIO,
+	BOND_OPT_AD_ACTOR_SYSTEM,
+	BOND_OPT_AD_USER_PORT_KEY,
 	BOND_OPT_LAST
 };
 
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 78ed135..20defc0 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -136,6 +136,9 @@
 	int packets_per_slave;
 	int tlb_dynamic_lb;
 	struct reciprocal_value reciprocal_packets_per_slave;
+	u16 ad_actor_sys_prio;
+	u16 ad_user_port_key;
+	u8 ad_actor_system[ETH_ALEN];
 };
 
 struct bond_parm_tbl {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f8d6813..a741678 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -111,7 +111,7 @@
  *	This may be due to the driver or due to regulatory bandwidth
  *	restrictions.
  * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
- * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
  * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
  *	on this channel.
  * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
@@ -129,7 +129,7 @@
 	IEEE80211_CHAN_NO_80MHZ		= 1<<7,
 	IEEE80211_CHAN_NO_160MHZ	= 1<<8,
 	IEEE80211_CHAN_INDOOR_ONLY	= 1<<9,
-	IEEE80211_CHAN_GO_CONCURRENT	= 1<<10,
+	IEEE80211_CHAN_IR_CONCURRENT	= 1<<10,
 	IEEE80211_CHAN_NO_20MHZ		= 1<<11,
 	IEEE80211_CHAN_NO_10MHZ		= 1<<12,
 };
@@ -4575,13 +4575,15 @@
  * @ie: information elements of the deauth/disassoc frame (may be %NULL)
  * @ie_len: length of IEs
  * @reason: reason code for the disconnection, set it to 0 if unknown
+ * @locally_generated: disconnection was requested locally
  * @gfp: allocation flags
  *
  * After it calls this function, the driver should enter an idle state
  * and not try to connect to any AP any more.
  */
 void cfg80211_disconnected(struct net_device *dev, u16 reason,
-			   const u8 *ie, size_t ie_len, gfp_t gfp);
+			   const u8 *ie, size_t ie_len,
+			   bool locally_generated, gfp_t gfp);
 
 /**
  * cfg80211_ready_on_channel - notification of remain_on_channel start
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 6ea16c8..290a9a6 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -44,6 +44,8 @@
 	int	(*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
 	int	(*set_cca_mode)(struct wpan_phy *wpan_phy,
 				const struct wpan_phy_cca *cca);
+	int     (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+	int     (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
 	int	(*set_pan_id)(struct wpan_phy *wpan_phy,
 			      struct wpan_dev *wpan_dev, __le16 pan_id);
 	int	(*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -61,14 +63,66 @@
 				struct wpan_dev *wpan_dev, bool mode);
 };
 
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+	switch (st) {
+	case NL802154_SUPPORTED_BOOL_TRUE:
+		return b;
+	case NL802154_SUPPORTED_BOOL_FALSE:
+		return !b;
+	case NL802154_SUPPORTED_BOOL_BOTH:
+		return true;
+	default:
+		WARN_ON(1);
+	}
+
+	return false;
+}
+
+struct wpan_phy_supported {
+	u32 channels[IEEE802154_MAX_PAGE + 1],
+	    cca_modes, cca_opts, iftypes;
+	enum nl802154_supported_bool_states lbt;
+	u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+	   min_csma_backoffs, max_csma_backoffs;
+	s8 min_frame_retries, max_frame_retries;
+	size_t tx_powers_size, cca_ed_levels_size;
+	const s32 *tx_powers, *cca_ed_levels;
+};
+
 struct wpan_phy_cca {
 	enum nl802154_cca_modes mode;
 	enum nl802154_cca_opts opt;
 };
 
-struct wpan_phy {
-	struct mutex pib_lock;
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+	if (a->mode != b->mode)
+		return false;
 
+	if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+		return a->opt == b->opt;
+
+	return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ *	transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ *	level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ *	setting.
+ */
+enum wpan_phy_flags {
+	WPAN_PHY_FLAG_TXPOWER		= BIT(1),
+	WPAN_PHY_FLAG_CCA_ED_LEVEL	= BIT(2),
+	WPAN_PHY_FLAG_CCA_MODE		= BIT(3),
+};
+
+struct wpan_phy {
 	/* If multiple wpan_phys are registered and you're handed e.g.
 	 * a regular netdev with assigned ieee802154_ptr, you won't
 	 * know whether it points to a wpan_phy your driver has registered
@@ -77,6 +131,8 @@
 	 */
 	const void *privid;
 
+	u32 flags;
+
 	/*
 	 * This is a PIB according to 802.15.4-2011.
 	 * We do not provide timing-related variables, as they
@@ -84,12 +140,14 @@
 	 */
 	u8 current_channel;
 	u8 current_page;
-	u32 channels_supported[IEEE802154_MAX_PAGE + 1];
-	s8 transmit_power;
+	struct wpan_phy_supported supported;
+	/* current transmit_power in mBm */
+	s32 transmit_power;
 	struct wpan_phy_cca cca;
 
 	__le64 perm_extended_addr;
 
+	/* current cca ed threshold in mBm */
 	s32 cca_ed_level;
 
 	/* PHY depended MAC PIB values */
@@ -121,9 +179,9 @@
 	__le64 extended_addr;
 
 	/* MAC BSN field */
-	u8 bsn;
+	atomic_t bsn;
 	/* MAC DSN field */
-	u8 dsn;
+	atomic_t dsn;
 
 	u8 min_be;
 	u8 max_be;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 0a55ac7..2d1d73c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,7 +122,9 @@
 
 static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
 {
-	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
+	__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+
+	*sum = csum_fold(csum_add(tmp, (__force __wsum)to));
 }
 
 /* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index 1e18005..267e702 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -7,7 +7,7 @@
  *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
  *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -119,12 +119,14 @@
 /**
  * struct codel_params - contains codel parameters
  * @target:	target queue size (in time units)
+ * @ce_threshold:  threshold for marking packets with ECN CE
  * @interval:	width of moving time window
  * @mtu:	device mtu, or minimal queue backlog in bytes.
  * @ecn:	is Explicit Congestion Notification enabled
  */
 struct codel_params {
 	codel_time_t	target;
+	codel_time_t	ce_threshold;
 	codel_time_t	interval;
 	u32		mtu;
 	bool		ecn;
@@ -161,19 +163,24 @@
  * @maxpacket:	largest packet we've seen so far
  * @drop_count:	temp count of dropped packets in dequeue()
  * ecn_mark:	number of packets we ECN marked instead of dropping
+ * ce_mark:	number of packets CE marked because sojourn time was above ce_threshold
  */
 struct codel_stats {
 	u32		maxpacket;
 	u32		drop_count;
 	u32		ecn_mark;
+	u32		ce_mark;
 };
 
+#define CODEL_DISABLED_THRESHOLD INT_MAX
+
 static void codel_params_init(struct codel_params *params,
 			      const struct Qdisc *sch)
 {
 	params->interval = MS2TIME(100);
 	params->target = MS2TIME(5);
 	params->mtu = psched_mtu(qdisc_dev(sch));
+	params->ce_threshold = CODEL_DISABLED_THRESHOLD;
 	params->ecn = false;
 }
 
@@ -354,6 +361,9 @@
 						    vars->rec_inv_sqrt);
 	}
 end:
+	if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+	    INET_ECN_set_ce(skb))
+		stats->ce_mark++;
 	return skb;
 }
 #endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 0fb99a2..2bc73f8a 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -109,7 +109,6 @@
 extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY		0x1UL
-#define DST_METRICS_FORCE_OVERWRITE	0x2UL
 #define DST_METRICS_FLAGS		0x3UL
 #define __DST_METRICS_PTR(Y)	\
 	((u32 *)((Y) & ~DST_METRICS_FLAGS))
@@ -120,11 +119,6 @@
 	return dst->_metrics & DST_METRICS_READ_ONLY;
 }
 
-static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
-{
-	dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
-}
-
 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
 
 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
@@ -355,18 +349,6 @@
 	__skb_tunnel_rx(skb, dev, net);
 }
 
-/* Children define the path of the packet through the
- * Linux networking.  Thus, destinations are stackable.
- */
-
-static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
-{
-	struct dst_entry *child = dst_clone(skb_dst(skb)->child);
-
-	skb_dst_drop(skb);
-	return child;
-}
-
 int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
 static inline int dst_discard(struct sk_buff *skb)
 {
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 6d67383..903a55e 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -36,7 +36,8 @@
 	void			*result;
 	struct fib_rule		*rule;
 	int			flags;
-#define FIB_LOOKUP_NOREF	1
+#define FIB_LOOKUP_NOREF		1
+#define FIB_LOOKUP_IGNORE_LINKSTATE	2
 };
 
 struct fib_rules_ops {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644
index 0000000..1a8c224
--- /dev/null
+++ b/include/net/flow_dissector.h
@@ -0,0 +1,220 @@
+#ifndef _NET_FLOW_DISSECTOR_H
+#define _NET_FLOW_DISSECTOR_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_ether.h>
+
+/**
+ * struct flow_dissector_key_control:
+ * @thoff: Transport header offset
+ */
+struct flow_dissector_key_control {
+	u16	thoff;
+	u16	addr_type;
+};
+
+/**
+ * struct flow_dissector_key_basic:
+ * @thoff: Transport header offset
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ */
+struct flow_dissector_key_basic {
+	__be16	n_proto;
+	u8	ip_proto;
+	u8	padding;
+};
+
+struct flow_dissector_key_tags {
+	u32	vlan_id:12,
+		flow_label:20;
+};
+
+struct flow_dissector_key_keyid {
+	__be32	keyid;
+};
+
+/**
+ * struct flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv4_addrs {
+	/* (src,dst) must be grouped, in the same way than in IP header */
+	__be32 src;
+	__be32 dst;
+};
+
+/**
+ * struct flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv6_addrs {
+	/* (src,dst) must be grouped, in the same way than in IP header */
+	struct in6_addr src;
+	struct in6_addr dst;
+};
+
+/**
+ * struct flow_dissector_key_tipc_addrs:
+ * @srcnode: source node address
+ */
+struct flow_dissector_key_tipc_addrs {
+	__be32 srcnode;
+};
+
+/**
+ * struct flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct flow_dissector_key_addrs {
+	union {
+		struct flow_dissector_key_ipv4_addrs v4addrs;
+		struct flow_dissector_key_ipv6_addrs v6addrs;
+		struct flow_dissector_key_tipc_addrs tipcaddrs;
+	};
+};
+
+/**
+ * flow_dissector_key_tp_ports:
+ *	@ports: port numbers of Transport header
+ *		src: source port number
+ *		dst: destination port number
+ */
+struct flow_dissector_key_ports {
+	union {
+		__be32 ports;
+		struct {
+			__be16 src;
+			__be16 dst;
+		};
+	};
+};
+
+
+/**
+ * struct flow_dissector_key_eth_addrs:
+ * @src: source Ethernet address
+ * @dst: destination Ethernet address
+ */
+struct flow_dissector_key_eth_addrs {
+	/* (dst,src) must be grouped, in the same way than in ETH header */
+	unsigned char dst[ETH_ALEN];
+	unsigned char src[ETH_ALEN];
+};
+
+enum flow_dissector_key_id {
+	FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
+	FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
+	FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+	FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+	FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+	FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+	FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+	FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+	FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+	FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
+	FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+
+	FLOW_DISSECTOR_KEY_MAX,
+};
+
+struct flow_dissector_key {
+	enum flow_dissector_key_id key_id;
+	size_t offset; /* offset of struct flow_dissector_key_*
+			  in target the struct */
+};
+
+struct flow_dissector {
+	unsigned int used_keys; /* each bit repesents presence of one key id */
+	unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
+};
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+			     const struct flow_dissector_key *key,
+			     unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+			struct flow_dissector *flow_dissector,
+			void *target_container,
+			void *data, __be16 proto, int nhoff, int hlen);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+				    struct flow_dissector *flow_dissector,
+				    void *target_container)
+{
+	return __skb_flow_dissect(skb, flow_dissector, target_container,
+				  NULL, 0, 0, 0);
+}
+
+struct flow_keys {
+	struct flow_dissector_key_control control;
+#define FLOW_KEYS_HASH_START_FIELD basic
+	struct flow_dissector_key_basic basic;
+	struct flow_dissector_key_tags tags;
+	struct flow_dissector_key_keyid keyid;
+	struct flow_dissector_key_ports ports;
+	struct flow_dissector_key_addrs addrs;
+};
+
+#define FLOW_KEYS_HASH_OFFSET		\
+	offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
+
+__be32 flow_get_u32_src(const struct flow_keys *flow);
+__be32 flow_get_u32_dst(const struct flow_keys *flow);
+
+extern struct flow_dissector flow_keys_dissector;
+extern struct flow_dissector flow_keys_buf_dissector;
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+					      struct flow_keys *flow)
+{
+	memset(flow, 0, sizeof(*flow));
+	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+				  NULL, 0, 0, 0);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+						  void *data, __be16 proto,
+						  int nhoff, int hlen)
+{
+	memset(flow, 0, sizeof(*flow));
+	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+				  data, proto, nhoff, hlen);
+}
+
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+			    void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+					int thoff, u8 ip_proto)
+{
+	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+		   const struct flow_keys *keys, int hlen);
+
+/* struct flow_keys_digest:
+ *
+ * This structure is used to hold a digest of the full flow keys. This is a
+ * larger "hash" of a flow to allow definitively matching specific flows where
+ * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
+ * that it can by used in CB of skb (see sch_choke for an example).
+ */
+#define FLOW_KEYS_DIGEST_LEN	16
+struct flow_keys_digest {
+	u8	data[FLOW_KEYS_DIGEST_LEN];
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+			   const struct flow_keys *flow);
+
+#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
deleted file mode 100644
index dc8fd81..0000000
--- a/include/net/flow_keys.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _NET_FLOW_KEYS_H
-#define _NET_FLOW_KEYS_H
-
-/* struct flow_keys:
- *	@src: source ip address in case of IPv4
- *	      For IPv6 it contains 32bit hash of src address
- *	@dst: destination ip address in case of IPv4
- *	      For IPv6 it contains 32bit hash of dst address
- *	@ports: port numbers of Transport header
- *		port16[0]: src port number
- *		port16[1]: dst port number
- *	@thoff: Transport header offset
- *	@n_proto: Network header protocol (eg. IPv4/IPv6)
- *	@ip_proto: Transport header protocol (eg. TCP/UDP)
- * All the members, except thoff, are in network byte order.
- */
-struct flow_keys {
-	/* (src,dst) must be grouped, in the same way than in IP header */
-	__be32 src;
-	__be32 dst;
-	union {
-		__be32 ports;
-		__be16 port16[2];
-	};
-	u16	thoff;
-	__be16	n_proto;
-	u8	ip_proto;
-};
-
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
-			void *data, __be16 proto, int nhoff, int hlen);
-static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
-{
-	return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
-}
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
-			    void *data, int hlen_proto);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
-{
-	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-u32 flow_hash_from_keys(struct flow_keys *keys);
-unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
-			   __be16 protocol);
-#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 14fb8d3..2a0543a 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,6 +62,11 @@
 	struct geneve_opt options[];
 };
 
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+	return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
 #ifdef CONFIG_INET
 struct geneve_sock;
 
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 94a2970..2c10a9f 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -346,15 +346,15 @@
 struct wpan_phy;
 
 enum {
-	IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
-	IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
-	IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
-	IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
-	IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
-	IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
-	IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
-	IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
-	IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
+	IEEE802154_LLSEC_PARAM_ENABLED		= BIT(0),
+	IEEE802154_LLSEC_PARAM_FRAME_COUNTER	= BIT(1),
+	IEEE802154_LLSEC_PARAM_OUT_LEVEL	= BIT(2),
+	IEEE802154_LLSEC_PARAM_OUT_KEY		= BIT(3),
+	IEEE802154_LLSEC_PARAM_KEY_SOURCE	= BIT(4),
+	IEEE802154_LLSEC_PARAM_PAN_ID		= BIT(5),
+	IEEE802154_LLSEC_PARAM_HWADDR		= BIT(6),
+	IEEE802154_LLSEC_PARAM_COORD_HWADDR	= BIT(7),
+	IEEE802154_LLSEC_PARAM_COORD_SHORTADDR	= BIT(8),
 };
 
 struct ieee802154_llsec_ops {
@@ -422,16 +422,6 @@
 			       struct ieee802154_mac_params *params);
 
 	struct ieee802154_llsec_ops *llsec;
-
-	/* The fields below are required. */
-
-	/*
-	 * FIXME: these should become the part of PIB/MIB interface.
-	 * However we still don't have IB interface of any kind
-	 */
-	__le16 (*get_pan_id)(const struct net_device *dev);
-	__le16 (*get_short_addr)(const struct net_device *dev);
-	u8 (*get_dsn)(const struct net_device *dev);
 };
 
 static inline struct ieee802154_mlme_ops *
@@ -440,10 +430,4 @@
 	return dev->ml_priv;
 }
 
-static inline struct ieee802154_reduced_mlme_ops *
-ieee802154_reduced_mlme_ops(const struct net_device *dev)
-{
-	return dev->ml_priv;
-}
-
 #endif
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 4a92423..279f835 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -41,7 +41,7 @@
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
-	sk_release_kernel(sk);
+	sock_release(sk->sk_socket);
 }
 
 #endif
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 8d17655..e1300b3 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -43,7 +43,7 @@
  * @len: total length of the original datagram
  * @meat: length of received fragments so far
  * @flags: fragment queue flags
- * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @max_size: maximum received fragment size
  * @net: namespace that this frag belongs to
  */
 struct inet_frag_queue {
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 73fe0f9..b73c88a 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -24,7 +24,6 @@
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/wait.h>
-#include <linux/vmalloc.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_sock.h>
@@ -148,8 +147,6 @@
 	 */
 	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
 					____cacheline_aligned_in_smp;
-
-	atomic_t			bsockets;
 };
 
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -166,52 +163,12 @@
 	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
 }
 
-static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
-{
-	unsigned int i, size = 256;
-#if defined(CONFIG_PROVE_LOCKING)
-	unsigned int nr_pcpus = 2;
-#else
-	unsigned int nr_pcpus = num_possible_cpus();
-#endif
-	if (nr_pcpus >= 4)
-		size = 512;
-	if (nr_pcpus >= 8)
-		size = 1024;
-	if (nr_pcpus >= 16)
-		size = 2048;
-	if (nr_pcpus >= 32)
-		size = 4096;
-	if (sizeof(spinlock_t) != 0) {
-#ifdef CONFIG_NUMA
-		if (size * sizeof(spinlock_t) > PAGE_SIZE)
-			hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
-		else
-#endif
-		hashinfo->ehash_locks =	kmalloc(size * sizeof(spinlock_t),
-						GFP_KERNEL);
-		if (!hashinfo->ehash_locks)
-			return ENOMEM;
-		for (i = 0; i < size; i++)
-			spin_lock_init(&hashinfo->ehash_locks[i]);
-	}
-	hashinfo->ehash_locks_mask = size - 1;
-	return 0;
-}
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
 
 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
 {
-	if (hashinfo->ehash_locks) {
-#ifdef CONFIG_NUMA
-		unsigned int size = (hashinfo->ehash_locks_mask + 1) *
-							sizeof(spinlock_t);
-		if (size > PAGE_SIZE)
-			vfree(hashinfo->ehash_locks);
-		else
-#endif
-		kfree(hashinfo->ehash_locks);
-		hashinfo->ehash_locks = NULL;
-	}
+	kvfree(hashinfo->ehash_locks);
+	hashinfo->ehash_locks = NULL;
 }
 
 struct inet_bind_bucket *
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b6c3737..47eb67b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -187,6 +187,7 @@
 				transparent:1,
 				mc_all:1,
 				nodefrag:1;
+	__u8			bind_address_no_port:1;
 	__u8			rcv_tos;
 	__u8			convert_csum;
 	int			uc_index;
diff --git a/include/net/ip.h b/include/net/ip.h
index d14af7e..0750a18 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,7 +31,7 @@
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 
 struct sock;
 
@@ -45,6 +45,7 @@
 #define IPSKB_FRAG_COMPLETE	BIT(3)
 #define IPSKB_REROUTED		BIT(4)
 #define IPSKB_DOREDIRECT	BIT(5)
+#define IPSKB_FRAG_PMTU		BIT(6)
 
 	u16			frag_max_size;
 };
@@ -108,9 +109,8 @@
 int ip_mr_input(struct sk_buff *skb);
 int ip_output(struct sock *sk, struct sk_buff *skb);
 int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
-		int (*output)(struct sock *, struct sk_buff *));
-int ip_do_nat(struct sk_buff *skb);
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+		   int (*output)(struct sock *, struct sk_buff *));
 void ip_send_check(struct iphdr *ip);
 int __ip_local_out(struct sk_buff *skb);
 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
@@ -355,15 +355,32 @@
 				  skb->len, proto, 0);
 }
 
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to :	flow->v4addrs.src = iph->saddr;
+ *			flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+					    const struct iphdr *iph)
+{
+	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+		     offsetof(typeof(flow->addrs), v4addrs.src) +
+			      sizeof(flow->addrs.v4addrs.src));
+	memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+}
+
 static inline void inet_set_txhash(struct sock *sk)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct flow_keys keys;
 
-	keys.src = inet->inet_saddr;
-	keys.dst = inet->inet_daddr;
-	keys.port16[0] = inet->inet_sport;
-	keys.port16[1] = inet->inet_dport;
+	memset(&keys, 0, sizeof(keys));
+
+	keys.addrs.v4addrs.src = inet->inet_saddr;
+	keys.addrs.v4addrs.dst = inet->inet_daddr;
+	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+	keys.ports.src = inet->inet_sport;
+	keys.ports.dst = inet->inet_dport;
 
 	sk->sk_txhash = flow_hash_from_keys(&keys);
 }
@@ -478,6 +495,16 @@
 	IP_DEFRAG_MACVLAN,
 };
 
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+					     enum ip_defrag_users lower_bond,
+					     enum ip_defrag_users upper_bond)
+{
+	return user >= lower_bond && user <= upper_bond;
+}
+
 int ip_defrag(struct sk_buff *skb, u32 user);
 #ifdef CONFIG_INET
 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 20e80fa..3b76849 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -120,45 +120,19 @@
 	struct rt6key			rt6i_src;
 	struct rt6key			rt6i_prefsrc;
 
+	struct list_head		rt6i_uncached;
+	struct uncached_list		*rt6i_uncached_list;
+
 	struct inet6_dev		*rt6i_idev;
-	unsigned long			_rt6i_peer;
+	struct rt6_info * __percpu	*rt6i_pcpu;
 
 	u32				rt6i_metric;
+	u32				rt6i_pmtu;
 	/* more non-fragment space at head required */
 	unsigned short			rt6i_nfheader_len;
 	u8				rt6i_protocol;
 };
 
-static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
-{
-	return inetpeer_ptr(rt->_rt6i_peer);
-}
-
-static inline bool rt6_has_peer(struct rt6_info *rt)
-{
-	return inetpeer_ptr_is_peer(rt->_rt6i_peer);
-}
-
-static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
-	__inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
-	return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
-{
-	inetpeer_init_ptr(&rt->_rt6i_peer, base);
-}
-
-static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
-{
-	inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
-}
-
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
 {
 	return ((struct rt6_info *)dst)->rt6i_idev;
@@ -189,13 +163,12 @@
 	rt0->rt6i_flags |= RTF_EXPIRES;
 }
 
-static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 {
-	struct dst_entry *new = (struct dst_entry *) from;
+	if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
+		rt = (struct rt6_info *)(rt->dst.from);
 
-	rt->rt6i_flags &= ~RTF_EXPIRES;
-	dst_hold(new);
-	rt->dst.from = new;
+	return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
 }
 
 static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5e19206..297629a 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -145,7 +145,7 @@
 #ifdef CONFIG_IPV6_SUBTREES
 	np->saddr_cache = saddr;
 #endif
-	np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+	np->dst_cookie = rt6_get_cookie(rt);
 }
 
 static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
@@ -163,11 +163,14 @@
 	return rt->rt6i_flags & RTF_LOCAL;
 }
 
-static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+					    const struct in6_addr *daddr)
 {
-	struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+	struct rt6_info *rt = (struct rt6_info *)dst;
 
-	return rt->rt6i_flags & RTF_ANYCAST;
+	return rt->rt6i_flags & RTF_ANYCAST ||
+		(rt->rt6i_dst.plen != 128 &&
+		 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
 }
 
 int ip6_fragment(struct sock *sk, struct sk_buff *skb,
@@ -194,9 +197,15 @@
 	       inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
 }
 
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
+					   struct in6_addr *daddr)
 {
-	return &rt->rt6i_gateway;
+	if (rt->rt6i_flags & RTF_GATEWAY)
+		return &rt->rt6i_gateway;
+	else if (unlikely(rt->rt6i_flags & RTF_CACHE))
+		return &rt->rt6i_dst.addr;
+	else
+		return daddr;
 }
 
 #endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 54271ed..49c142b 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -226,7 +226,7 @@
 }
 
 static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
-			     struct fib_result *res)
+			     struct fib_result *res, unsigned int flags)
 {
 	struct fib_table *tb;
 	int err = -ENETUNREACH;
@@ -234,7 +234,7 @@
 	rcu_read_lock();
 
 	tb = fib_get_table(net, RT_TABLE_MAIN);
-	if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+	if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF))
 		err = 0;
 
 	rcu_read_unlock();
@@ -249,16 +249,18 @@
 struct fib_table *fib_new_table(struct net *net, u32 id);
 struct fib_table *fib_get_table(struct net *net, u32 id);
 
-int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp,
+		 struct fib_result *res, unsigned int flags);
 
 static inline int fib_lookup(struct net *net, struct flowi4 *flp,
-			     struct fib_result *res)
+			     struct fib_result *res, unsigned int flags)
 {
 	struct fib_table *tb;
 	int err;
 
+	flags |= FIB_LOOKUP_NOREF;
 	if (net->ipv4.fib_has_custom_rules)
-		return __fib_lookup(net, flp, res);
+		return __fib_lookup(net, flp, res, flags);
 
 	rcu_read_lock();
 
@@ -266,11 +268,11 @@
 
 	for (err = 0; !err; err = -ENETUNREACH) {
 		tb = rcu_dereference_rtnl(net->ipv4.fib_main);
-		if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+		if (tb && !fib_table_lookup(tb, flp, res, flags))
 			break;
 
 		tb = rcu_dereference_rtnl(net->ipv4.fib_default);
-		if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+		if (tb && !fib_table_lookup(tb, flp, res, flags))
 			break;
 	}
 
@@ -305,9 +307,9 @@
 
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
-int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_dev(struct net_device *dev, unsigned long event);
 int fib_sync_down_addr(struct net *net, __be32 local);
-int fib_sync_up(struct net_device *dev);
+int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
 void fib_select_multipath(struct fib_result *res);
 
 /* Exported by fib_trie.c */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index eec8ad3..82dbdb0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -19,7 +19,7 @@
 #include <net/if_inet6.h>
 #include <net/ndisc.h>
 #include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 #include <net/snmp.h>
 
 #define SIN6_LEN_RFC2133	24
@@ -239,8 +239,10 @@
 	struct net		*fl_net;
 };
 
-#define IPV6_FLOWINFO_MASK	cpu_to_be32(0x0FFFFFFF)
-#define IPV6_FLOWLABEL_MASK	cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWINFO_MASK		cpu_to_be32(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK		cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWLABEL_STATELESS_FLAG	cpu_to_be32(0x00080000)
+
 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
 #define IPV6_TCLASS_SHIFT	20
 
@@ -669,8 +671,9 @@
 	return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
-		       struct rt6_info *rt);
+__be32 ipv6_select_ident(struct net *net,
+			 const struct in6_addr *daddr,
+			 const struct in6_addr *saddr);
 void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -689,6 +692,20 @@
 	return hlimit;
 }
 
+/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to :	flow->v6addrs.src = iph->saddr;
+ *			flow->v6addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
+					    const struct ipv6hdr *iph)
+{
+	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
+		     offsetof(typeof(flow->addrs), v6addrs.src) +
+		     sizeof(flow->addrs.v6addrs.src));
+	memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline void ip6_set_txhash(struct sock *sk)
 {
@@ -696,10 +713,15 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct flow_keys keys;
 
-	keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
-	keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
-	keys.port16[0] = inet->inet_sport;
-	keys.port16[1] = inet->inet_dport;
+	memset(&keys, 0, sizeof(keys));
+
+	memcpy(&keys.addrs.v6addrs.src, &np->saddr,
+	       sizeof(keys.addrs.v6addrs.src));
+	memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
+	       sizeof(keys.addrs.v6addrs.dst));
+	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+	keys.ports.src = inet->inet_sport;
+	keys.ports.dst = inet->inet_dport;
 
 	sk->sk_txhash = flow_hash_from_keys(&keys);
 }
@@ -719,6 +741,9 @@
 		hash ^= hash >> 12;
 
 		flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+		if (net->ipv6.sysctl.flowlabel_state_ranges)
+			flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
 	}
 
 	return flowlabel;
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 0134681..fe994d2 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -96,7 +96,7 @@
 }
 
 struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
-			  struct proto *prot);
+			  struct proto *prot, int kern);
 void llc_sk_free(struct sock *sk);
 
 void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index fc57f6b..6b1077c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -337,10 +337,16 @@
  * enum ieee80211_event_type - event to be notified to the low level driver
  * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
  * @MLME_EVENT: event related to MLME
+ * @BAR_RX_EVENT: a BAR was received
+ * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
+ *	they timed out. This won't be called for each frame released, but only
+ *	once each time the timeout triggers.
  */
 enum ieee80211_event_type {
 	RSSI_EVENT,
 	MLME_EVENT,
+	BAR_RX_EVENT,
+	BA_FRAME_TIMEOUT,
 };
 
 /**
@@ -400,17 +406,31 @@
 };
 
 /**
+ * struct ieee80211_ba_event - data attached for BlockAck related events
+ * @sta: pointer to the &ieee80211_sta to which this event relates
+ * @tid: the tid
+ * @ssn: the starting sequence number (for %BAR_RX_EVENT)
+ */
+struct ieee80211_ba_event {
+	struct ieee80211_sta *sta;
+	u16 tid;
+	u16 ssn;
+};
+
+/**
  * struct ieee80211_event - event to be sent to the driver
  * @type: The event itself. See &enum ieee80211_event_type.
  * @rssi: relevant if &type is %RSSI_EVENT
  * @mlme: relevant if &type is %AUTH_EVENT
- * @u:    union holding the above two fields
+ * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u:union holding the fields above
  */
 struct ieee80211_event {
 	enum ieee80211_event_type type;
 	union {
 		struct ieee80211_rssi_event rssi;
 		struct ieee80211_mlme_event mlme;
+		struct ieee80211_ba_event ba;
 	} u;
 };
 
@@ -426,12 +446,8 @@
  * @ibss_creator: indicates if a new IBSS network is being created
  * @aid: association ID number, valid only when @assoc is true
  * @use_cts_prot: use CTS protection
- * @use_short_preamble: use 802.11b short preamble;
- *	if the hardware cannot handle this it must set the
- *	IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag
- * @use_short_slot: use short slot time (only relevant for ERP);
- *	if the hardware cannot handle this it must set the
- *	IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
+ * @use_short_preamble: use 802.11b short preamble
+ * @use_short_slot: use short slot time (only relevant for ERP)
  * @dtim_period: num of beacons before the next DTIM, for beaconing,
  *	valid in station mode only if after the driver was notified
  *	with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
@@ -855,6 +871,9 @@
 			/* 4 bytes free */
 		} control;
 		struct {
+			u64 cookie;
+		} ack;
+		struct {
 			struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
 			s32 ack_signal;
 			u8 ampdu_ack_len;
@@ -1459,6 +1478,9 @@
  *	wants to be given when a frame is transmitted and needs to be
  *	encrypted in hardware.
  * @cipher: The key's cipher suite selector.
+ * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
+ *	as well if it needs to do software PN assignment by itself
+ *	(e.g. due to TSO)
  * @flags: key flags, see &enum ieee80211_key_flags.
  * @keyidx: the key index (0-3)
  * @keylen: key material length
@@ -1471,6 +1493,7 @@
  * @iv_len: The IV length for this key type
  */
 struct ieee80211_key_conf {
+	atomic64_t tx_pn;
 	u32 cipher;
 	u8 icv_len;
 	u8 iv_len;
@@ -1481,6 +1504,47 @@
 	u8 key[0];
 };
 
+#define IEEE80211_MAX_PN_LEN	16
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ *	reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ *	reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ *	reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ *	reverse order than in packet)
+ * @hw: data for HW-only (e.g. cipher scheme) keys
+ */
+struct ieee80211_key_seq {
+	union {
+		struct {
+			u32 iv32;
+			u16 iv16;
+		} tkip;
+		struct {
+			u8 pn[6];
+		} ccmp;
+		struct {
+			u8 pn[6];
+		} aes_cmac;
+		struct {
+			u8 pn[6];
+		} aes_gmac;
+		struct {
+			u8 pn[6];
+		} gcmp;
+		struct {
+			u8 seq[IEEE80211_MAX_PN_LEN];
+			u8 seq_len;
+		} hw;
+	};
+};
+
 /**
  * struct ieee80211_cipher_scheme - cipher scheme
  *
@@ -1667,8 +1731,7 @@
  * @sta: station table entry, %NULL for per-vif queue
  * @tid: the TID for this queue (unused for per-vif queue)
  * @ac: the AC for this queue
- * @drv_priv: data area for driver use, will always be aligned to
- *	sizeof(void *).
+ * @drv_priv: driver private area, sized by hw->txq_data_size
  *
  * The driver can obtain packets from this queue by calling
  * ieee80211_tx_dequeue().
@@ -1717,13 +1780,6 @@
  *	multicast frames when there are power saving stations so that
  *	the driver can fetch them with ieee80211_get_buffered_bc().
  *
- * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE:
- *	Hardware is not capable of short slot operation on the 2.4 GHz band.
- *
- * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
- *	Hardware is not capable of receiving frames with short preamble on
- *	the 2.4 GHz band.
- *
  * @IEEE80211_HW_SIGNAL_UNSPEC:
  *	Hardware can provide signal values but we don't know its units. We
  *	expect values between 0 and @max_signal.
@@ -1798,6 +1854,10 @@
  *	the driver returns 1. This also forces the driver to advertise its
  *	supported cipher suites.
  *
+ * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
+ *	this currently requires only the ability to calculate the duration
+ *	for frames.
+ *
  * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
  *	queue mapping in order to use different queues (not just one per AC)
  *	for different virtual interfaces. See the doc section on HW queue
@@ -1825,41 +1885,44 @@
  * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
  *	or tailroom of TX skbs without copying them first.
  *
- * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
+ * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
  *	in one command, mac80211 doesn't have to run separate scans per band.
+ *
+ * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
-	IEEE80211_HW_HAS_RATE_CONTROL			= 1<<0,
-	IEEE80211_HW_RX_INCLUDES_FCS			= 1<<1,
-	IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING	= 1<<2,
-	IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE		= 1<<3,
-	IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE	= 1<<4,
-	IEEE80211_HW_SIGNAL_UNSPEC			= 1<<5,
-	IEEE80211_HW_SIGNAL_DBM				= 1<<6,
-	IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC		= 1<<7,
-	IEEE80211_HW_SPECTRUM_MGMT			= 1<<8,
-	IEEE80211_HW_AMPDU_AGGREGATION			= 1<<9,
-	IEEE80211_HW_SUPPORTS_PS			= 1<<10,
-	IEEE80211_HW_PS_NULLFUNC_STACK			= 1<<11,
-	IEEE80211_HW_SUPPORTS_DYNAMIC_PS		= 1<<12,
-	IEEE80211_HW_MFP_CAPABLE			= 1<<13,
-	IEEE80211_HW_WANT_MONITOR_VIF			= 1<<14,
-	IEEE80211_HW_NO_AUTO_VIF			= 1<<15,
-	IEEE80211_HW_SW_CRYPTO_CONTROL			= 1<<16,
-	/* free slots */
-	IEEE80211_HW_REPORTS_TX_ACK_STATUS		= 1<<18,
-	IEEE80211_HW_CONNECTION_MONITOR			= 1<<19,
-	IEEE80211_HW_QUEUE_CONTROL			= 1<<20,
-	IEEE80211_HW_SUPPORTS_PER_STA_GTK		= 1<<21,
-	IEEE80211_HW_AP_LINK_PS				= 1<<22,
-	IEEE80211_HW_TX_AMPDU_SETUP_IN_HW		= 1<<23,
-	IEEE80211_HW_SUPPORTS_RC_TABLE			= 1<<24,
-	IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF		= 1<<25,
-	IEEE80211_HW_TIMING_BEACON_ONLY			= 1<<26,
-	IEEE80211_HW_SUPPORTS_HT_CCK_RATES		= 1<<27,
-	IEEE80211_HW_CHANCTX_STA_CSA			= 1<<28,
-	IEEE80211_HW_SUPPORTS_CLONED_SKBS		= 1<<29,
-	IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS		= 1<<30,
+	IEEE80211_HW_HAS_RATE_CONTROL,
+	IEEE80211_HW_RX_INCLUDES_FCS,
+	IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING,
+	IEEE80211_HW_SIGNAL_UNSPEC,
+	IEEE80211_HW_SIGNAL_DBM,
+	IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC,
+	IEEE80211_HW_SPECTRUM_MGMT,
+	IEEE80211_HW_AMPDU_AGGREGATION,
+	IEEE80211_HW_SUPPORTS_PS,
+	IEEE80211_HW_PS_NULLFUNC_STACK,
+	IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
+	IEEE80211_HW_MFP_CAPABLE,
+	IEEE80211_HW_WANT_MONITOR_VIF,
+	IEEE80211_HW_NO_AUTO_VIF,
+	IEEE80211_HW_SW_CRYPTO_CONTROL,
+	IEEE80211_HW_SUPPORT_FAST_XMIT,
+	IEEE80211_HW_REPORTS_TX_ACK_STATUS,
+	IEEE80211_HW_CONNECTION_MONITOR,
+	IEEE80211_HW_QUEUE_CONTROL,
+	IEEE80211_HW_SUPPORTS_PER_STA_GTK,
+	IEEE80211_HW_AP_LINK_PS,
+	IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
+	IEEE80211_HW_SUPPORTS_RC_TABLE,
+	IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF,
+	IEEE80211_HW_TIMING_BEACON_ONLY,
+	IEEE80211_HW_SUPPORTS_HT_CCK_RATES,
+	IEEE80211_HW_CHANCTX_STA_CSA,
+	IEEE80211_HW_SUPPORTS_CLONED_SKBS,
+	IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+
+	/* keep last, obviously */
+	NUM_IEEE80211_HW_FLAGS
 };
 
 /**
@@ -1940,8 +2003,8 @@
  *	Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
  *
  * @netdev_features: netdev features to be set in each netdev created
- *	from this HW. Note only HW checksum features are currently
- *	compatible with mac80211. Other feature bits will be rejected.
+ *	from this HW. Note that not all features are usable with mac80211,
+ *	other features will be rejected during HW registration.
  *
  * @uapsd_queues: This bitmap is included in (re)association frame to indicate
  *	for each access category if it is uAPSD trigger-enabled and delivery-
@@ -1966,7 +2029,7 @@
 	struct wiphy *wiphy;
 	const char *rate_control_algorithm;
 	void *priv;
-	u32 flags;
+	unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
 	unsigned int extra_tx_headroom;
 	unsigned int extra_beacon_tailroom;
 	int vif_data_size;
@@ -1992,6 +2055,20 @@
 	int txq_ac_max_pending;
 };
 
+static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
+				       enum ieee80211_hw_flags flg)
+{
+	return test_bit(flg, hw->flags);
+}
+#define ieee80211_hw_check(hw, flg)	_ieee80211_hw_check(hw, IEEE80211_HW_##flg)
+
+static inline void _ieee80211_hw_set(struct ieee80211_hw *hw,
+				     enum ieee80211_hw_flags flg)
+{
+	return __set_bit(flg, hw->flags);
+}
+#define ieee80211_hw_set(hw, flg)	_ieee80211_hw_set(hw, IEEE80211_HW_##flg)
+
 /**
  * struct ieee80211_scan_request - hw scan request
  *
@@ -2505,10 +2582,6 @@
  * stack. It is always safe to pass more frames than requested,
  * but this has negative impact on power consumption.
  *
- * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS,
- *	think of the BSS as your network segment and then this corresponds
- *	to the regular ethernet device promiscuous mode.
- *
  * @FIF_ALLMULTI: pass all multicast frames, this is used if requested
  *	by the user or if the hardware is not capable of filtering by
  *	multicast address.
@@ -2525,18 +2598,16 @@
  *	mac80211 needs to do and the amount of CPU wakeups, so you should
  *	honour this flag if possible.
  *
- * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS
- * 	is not set then only those addressed to this station.
+ * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
+ *	station
  *
  * @FIF_OTHER_BSS: pass frames destined to other BSSes
  *
- * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only
- * 	those addressed to this station.
+ * @FIF_PSPOLL: pass PS Poll frames
  *
  * @FIF_PROBE_REQ: pass probe request frames
  */
 enum ieee80211_filter_flags {
-	FIF_PROMISC_IN_BSS	= 1<<0,
 	FIF_ALLMULTI		= 1<<1,
 	FIF_FCSFAIL		= 1<<2,
 	FIF_PLCPFAIL		= 1<<3,
@@ -2819,9 +2890,9 @@
  * 	Returns zero if statistics are available.
  *	The callback can sleep.
  *
- * @get_tkip_seq: If your device implements TKIP encryption in hardware this
- *	callback should be provided to read the TKIP transmit IVs (both IV32
- *	and IV16) for the given key from hardware.
+ * @get_key_seq: If your device implements encryption in hardware and does
+ *	IV/PN assignment then this callback should be provided to read the
+ *	IV/PN for the given key from hardware.
  *	The callback must be atomic.
  *
  * @set_frag_threshold: Configuration of fragmentation threshold. Assign this
@@ -3004,7 +3075,7 @@
  *	The callback can sleep.
  * @event_callback: Notify driver about any event in mac80211. See
  *	&enum ieee80211_event_type for the different types.
- *	The callback can sleep.
+ *	The callback must be atomic.
  *
  * @release_buffered_frames: Release buffered frames according to the given
  *	parameters. In the case where the driver buffers some frames for
@@ -3220,8 +3291,9 @@
 				 struct ieee80211_vif *vif);
 	int (*get_stats)(struct ieee80211_hw *hw,
 			 struct ieee80211_low_level_stats *stats);
-	void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
-			     u32 *iv32, u16 *iv16);
+	void (*get_key_seq)(struct ieee80211_hw *hw,
+			    struct ieee80211_key_conf *key,
+			    struct ieee80211_key_seq *seq);
 	int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
 	int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
 	int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3469,14 +3541,15 @@
 };
 
 #ifdef CONFIG_MAC80211_LEDS
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
-					 unsigned int flags,
-					 const struct ieee80211_tpt_blink *blink_table,
-					 unsigned int blink_table_len);
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+				   unsigned int flags,
+				   const struct ieee80211_tpt_blink *blink_table,
+				   unsigned int blink_table_len);
 #endif
 /**
  * ieee80211_get_tx_led_name - get name of TX LED
@@ -3490,7 +3563,7 @@
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
 	return __ieee80211_get_tx_led_name(hw);
@@ -3511,7 +3584,7 @@
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
 	return __ieee80211_get_rx_led_name(hw);
@@ -3532,7 +3605,7 @@
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
 	return __ieee80211_get_assoc_led_name(hw);
@@ -3553,7 +3626,7 @@
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
 	return __ieee80211_get_radio_led_name(hw);
@@ -3574,7 +3647,7 @@
  *
  * Note: This function must be called before ieee80211_register_hw().
  */
-static inline char *
+static inline const char *
 ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
 				 const struct ieee80211_tpt_blink *blink_table,
 				 unsigned int blink_table_len)
@@ -4255,40 +4328,6 @@
 					u8 *k1, u8 *k2);
 
 /**
- * struct ieee80211_key_seq - key sequence counter
- *
- * @tkip: TKIP data, containing IV32 and IV16 in host byte order
- * @ccmp: PN data, most significant byte first (big endian,
- *	reverse order than in packet)
- * @aes_cmac: PN data, most significant byte first (big endian,
- *	reverse order than in packet)
- * @aes_gmac: PN data, most significant byte first (big endian,
- *	reverse order than in packet)
- * @gcmp: PN data, most significant byte first (big endian,
- *	reverse order than in packet)
- */
-struct ieee80211_key_seq {
-	union {
-		struct {
-			u32 iv32;
-			u16 iv16;
-		} tkip;
-		struct {
-			u8 pn[6];
-		} ccmp;
-		struct {
-			u8 pn[6];
-		} aes_cmac;
-		struct {
-			u8 pn[6];
-		} aes_gmac;
-		struct {
-			u8 pn[6];
-		} gcmp;
-	};
-};
-
-/**
  * ieee80211_get_key_tx_seq - get key TX sequence counter
  *
  * @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 7df28a4..f534a46 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -31,99 +31,122 @@
  */
 #define MAC802154_FRAME_HARD_HEADER_LEN		(2 + 1 + 20 + 14)
 
-/* The following flags are used to indicate changed address settings from
+/**
+ * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
+ *
+ * The following flags are used to indicate changed address settings from
  * the stack to the hardware.
+ *
+ * @IEEE802154_AFILT_SADDR_CHANGED: Indicates that the short address will be
+ *	change.
+ *
+ * @IEEE802154_AFILT_IEEEADDR_CHANGED: Indicates that the extended address
+ *	will be change.
+ *
+ * @IEEE802154_AFILT_PANID_CHANGED: Indicates that the pan id will be change.
+ *
+ * @IEEE802154_AFILT_PANC_CHANGED: Indicates that the address filter will
+ *	do frame address filtering as a pan coordinator.
  */
+enum ieee802154_hw_addr_filt_flags {
+	IEEE802154_AFILT_SADDR_CHANGED		= BIT(0),
+	IEEE802154_AFILT_IEEEADDR_CHANGED	= BIT(1),
+	IEEE802154_AFILT_PANID_CHANGED		= BIT(2),
+	IEEE802154_AFILT_PANC_CHANGED		= BIT(3),
+};
 
-/* indicates that the Short Address changed */
-#define IEEE802154_AFILT_SADDR_CHANGED		0x00000001
-/* indicates that the IEEE Address changed */
-#define IEEE802154_AFILT_IEEEADDR_CHANGED	0x00000002
-/* indicates that the PAN ID changed */
-#define IEEE802154_AFILT_PANID_CHANGED		0x00000004
-/* indicates that PAN Coordinator status changed */
-#define IEEE802154_AFILT_PANC_CHANGED		0x00000008
-
+/**
+ * struct ieee802154_hw_addr_filt - hardware address filtering settings
+ *
+ * @pan_id: pan_id which should be set to the hardware address filter.
+ *
+ * @short_addr: short_addr which should be set to the hardware address filter.
+ *
+ * @ieee_addr: extended address which should be set to the hardware address
+ *	filter.
+ *
+ * @pan_coord: boolean if hardware filtering should be operate as coordinator.
+ */
 struct ieee802154_hw_addr_filt {
-	__le16	pan_id;		/* Each independent PAN selects a unique
-				 * identifier. This PAN id allows communication
-				 * between devices within a network using short
-				 * addresses and enables transmissions between
-				 * devices across independent networks.
-				 */
+	__le16	pan_id;
 	__le16	short_addr;
 	__le64	ieee_addr;
-	u8	pan_coord;
+	bool	pan_coord;
 };
 
-struct ieee802154_vif {
-	int type;
-
-	/* must be last */
-	u8 drv_priv[0] __aligned(sizeof(void *));
-};
-
+/**
+ * struct ieee802154_hw - ieee802154 hardware
+ *
+ * @extra_tx_headroom: headroom to reserve in each transmit skb for use by the
+ *	driver (e.g. for transmit headers.)
+ *
+ * @flags: hardware flags, see &enum ieee802154_hw_flags
+ *
+ * @parent: parent device of the hardware.
+ *
+ * @priv: pointer to private area that was allocated for driver use along with
+ *	this structure.
+ *
+ * @phy: This points to the &struct wpan_phy allocated for this 802.15.4 PHY.
+ */
 struct ieee802154_hw {
 	/* filled by the driver */
 	int	extra_tx_headroom;
 	u32	flags;
 	struct	device *parent;
+	void	*priv;
 
 	/* filled by mac802154 core */
-	struct	ieee802154_hw_addr_filt hw_filt;
-	void	*priv;
 	struct	wpan_phy *phy;
-	size_t vif_data_size;
 };
 
-/* Checksum is in hardware and is omitted from a packet
+/**
+ * enum ieee802154_hw_flags - hardware flags
  *
- * These following flags are used to indicate hardware capabilities to
+ * These flags are used to indicate hardware capabilities to
  * the stack. Generally, flags here should have their meaning
  * done in a way that the simplest hardware doesn't need setting
  * any particular flags. There are some exceptions to this rule,
  * however, so you are advised to review these flags carefully.
+ *
+ * @IEEE802154_HW_TX_OMIT_CKSUM: Indicates that xmitter will add FCS on it's
+ *	own.
+ *
+ * @IEEE802154_HW_LBT: Indicates that transceiver will support listen before
+ *	transmit.
+ *
+ * @IEEE802154_HW_CSMA_PARAMS: Indicates that transceiver will support csma
+ *	parameters (max_be, min_be, backoff exponents).
+ *
+ * @IEEE802154_HW_FRAME_RETRIES: Indicates that transceiver will support ARET
+ *	frame retries setting.
+ *
+ * @IEEE802154_HW_AFILT: Indicates that transceiver will support hardware
+ *	address filter setting.
+ *
+ * @IEEE802154_HW_PROMISCUOUS: Indicates that transceiver will support
+ *	promiscuous mode setting.
+ *
+ * @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
+ *
+ * @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
+ *	frames with bad checksum.
  */
-
-/* Indicates that xmitter will add FCS on it's own. */
-#define IEEE802154_HW_TX_OMIT_CKSUM	0x00000001
-/* Indicates that receiver will autorespond with ACK frames. */
-#define IEEE802154_HW_AACK		0x00000002
-/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER		0x00000004
-/* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT		0x00000008
-/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE		0x00000010
-/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL	0x00000020
-/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
- * settings. */
-#define IEEE802154_HW_CSMA_PARAMS	0x00000040
-/* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES	0x00000080
-/* Indicates that transceiver will support hardware address filter setting. */
-#define IEEE802154_HW_AFILT		0x00000100
-/* Indicates that transceiver will support promiscuous mode setting. */
-#define IEEE802154_HW_PROMISCUOUS	0x00000200
-/* Indicates that receiver omits FCS. */
-#define IEEE802154_HW_RX_OMIT_CKSUM	0x00000400
-/* Indicates that receiver will not filter frames with bad checksum. */
-#define IEEE802154_HW_RX_DROP_BAD_CKSUM	0x00000800
+enum ieee802154_hw_flags {
+	IEEE802154_HW_TX_OMIT_CKSUM	= BIT(0),
+	IEEE802154_HW_LBT		= BIT(1),
+	IEEE802154_HW_CSMA_PARAMS	= BIT(2),
+	IEEE802154_HW_FRAME_RETRIES	= BIT(3),
+	IEEE802154_HW_AFILT		= BIT(4),
+	IEEE802154_HW_PROMISCUOUS	= BIT(5),
+	IEEE802154_HW_RX_OMIT_CKSUM	= BIT(6),
+	IEEE802154_HW_RX_DROP_BAD_CKSUM	= BIT(7),
+};
 
 /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
 #define IEEE802154_HW_OMIT_CKSUM	(IEEE802154_HW_TX_OMIT_CKSUM | \
 					 IEEE802154_HW_RX_OMIT_CKSUM)
 
-/* This groups the most common CSMA support fields into one. */
-#define IEEE802154_HW_CSMA		(IEEE802154_HW_CCA_MODE | \
-					 IEEE802154_HW_CCA_ED_LEVEL | \
-					 IEEE802154_HW_CSMA_PARAMS)
-
-/* This groups the most common ARET support fields into one. */
-#define IEEE802154_HW_ARET		(IEEE802154_HW_CSMA | \
-					 IEEE802154_HW_FRAME_RETRIES)
-
 /* struct ieee802154_ops - callbacks from mac802154 to the driver
  *
  * This structure contains various callbacks that the driver may
@@ -171,7 +194,7 @@
  *	  Returns either zero, or negative errno.
  *
  * set_txpower:
- *	  Set radio transmit power in dB. Called with pib_lock held.
+ *	  Set radio transmit power in mBm. Called with pib_lock held.
  *	  Returns either zero, or negative errno.
  *
  * set_lbt
@@ -184,7 +207,7 @@
  *	  Returns either zero, or negative errno.
  *
  * set_cca_ed_level
- *	  Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ *	  Sets the CCA energy detection threshold in mBm. Called with pib_lock
  *	  held.
  *	  Returns either zero, or negative errno.
  *
@@ -213,12 +236,11 @@
 	int		(*set_hw_addr_filt)(struct ieee802154_hw *hw,
 					    struct ieee802154_hw_addr_filt *filt,
 					    unsigned long changed);
-	int		(*set_txpower)(struct ieee802154_hw *hw, s8 dbm);
+	int		(*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
 	int		(*set_lbt)(struct ieee802154_hw *hw, bool on);
 	int		(*set_cca_mode)(struct ieee802154_hw *hw,
 					const struct wpan_phy_cca *cca);
-	int		(*set_cca_ed_level)(struct ieee802154_hw *hw,
-					    s32 level);
+	int		(*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
 	int		(*set_csma_params)(struct ieee802154_hw *hw,
 					   u8 min_be, u8 max_be, u8 retries);
 	int		(*set_frame_retries)(struct ieee802154_hw *hw,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index f733656..e951453 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -28,6 +28,8 @@
 #include <net/netns/xfrm.h>
 #include <net/netns/mpls.h>
 #include <linux/ns_common.h>
+#include <linux/idr.h>
+#include <linux/skbuff.h>
 
 struct user_namespace;
 struct proc_dir_entry;
@@ -58,6 +60,7 @@
 	struct list_head	exit_list;	/* Use only net_mutex */
 
 	struct user_namespace   *user_ns;	/* Owning user namespace */
+	spinlock_t		nsid_lock;
 	struct idr		netns_ids;
 
 	struct ns_common	ns;
@@ -271,7 +274,9 @@
 #define __net_initconst	__initconst
 #endif
 
+int peernet2id_alloc(struct net *net, struct net *peer);
 int peernet2id(struct net *net, struct net *peer);
+bool peernet_has_id(struct net *net, struct net *peer);
 struct net *get_net_ns_by_id(struct net *net, int id);
 
 struct pernet_operations {
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 2aa6048..bab824b 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -1,6 +1,66 @@
 #ifndef _BR_NETFILTER_H_
 #define _BR_NETFILTER_H_
 
+#include "../../../net/bridge/br_private.h"
+
+static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
+{
+	skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
+
+	if (likely(skb->nf_bridge))
+		atomic_set(&(skb->nf_bridge->use), 1);
+
+	return skb->nf_bridge;
+}
+
+void nf_bridge_update_protocol(struct sk_buff *skb);
+
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+	return skb->nf_bridge;
+}
+
+unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
+
+static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
+{
+	unsigned int len = nf_bridge_encap_header_len(skb);
+
+	skb_push(skb, len);
+	skb->network_header -= len;
+}
+
+int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb);
+
+static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
+{
+	struct net_bridge_port *port;
+
+	port = br_port_get_rcu(dev);
+	return port ? &port->br->fake_rtable : NULL;
+}
+
+struct net_device *setup_pre_routing(struct sk_buff *skb);
 void br_netfilter_enable(void);
 
+#if IS_ENABLED(CONFIG_IPV6)
+int br_validate_ipv6(struct sk_buff *skb);
+unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
+				    struct sk_buff *skb,
+				    const struct nf_hook_state *state);
+#else
+static inline int br_validate_ipv6(struct sk_buff *skb)
+{
+	return -1;
+}
+
+static inline unsigned int
+br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+		       const struct nf_hook_state *state)
+{
+	return NF_DROP;
+}
+#endif
+
 #endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index d81d584..e863585 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -24,6 +24,8 @@
 struct nf_queue_handler {
 	int			(*outfn)(struct nf_queue_entry *entry,
 					 unsigned int queuenum);
+	void			(*nf_hook_drop)(struct net *net,
+						struct nf_hook_ops *ops);
 };
 
 void nf_register_queue_handler(const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e6bcf55..2a24668 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -781,6 +781,7 @@
 };
 
 #define NFT_HOOK_OPS_MAX		2
+#define NFT_BASECHAIN_DISABLED		(1 << 0)
 
 /**
  *	struct nft_base_chain - nf_tables base chain
@@ -791,14 +792,17 @@
  *	@policy: default policy
  *	@stats: per-cpu chain stats
  *	@chain: the chain
+ *	@dev_name: device name that this base chain is attached to (if any)
  */
 struct nft_base_chain {
 	struct nf_hook_ops		ops[NFT_HOOK_OPS_MAX];
 	possible_net_t			pnet;
 	const struct nf_chain_type	*type;
 	u8				policy;
+	u8				flags;
 	struct nft_stats __percpu	*stats;
 	struct nft_chain		chain;
+	char 				dev_name[IFNAMSIZ];
 };
 
 static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
@@ -806,6 +810,11 @@
 	return container_of(chain, struct nft_base_chain, chain);
 }
 
+int nft_register_basechain(struct nft_base_chain *basechain,
+			   unsigned int hook_nops);
+void nft_unregister_basechain(struct nft_base_chain *basechain,
+			      unsigned int hook_nops);
+
 unsigned int nft_do_chain(struct nft_pktinfo *pkt,
 			  const struct nf_hook_ops *ops);
 
@@ -830,6 +839,10 @@
 	char				name[NFT_TABLE_MAXNAMELEN];
 };
 
+enum nft_af_flags {
+	NFT_AF_NEEDS_DEV	= (1 << 0),
+};
+
 /**
  *	struct nft_af_info - nf_tables address family info
  *
@@ -838,6 +851,7 @@
  *	@nhooks: number of hooks in this family
  *	@owner: module owner
  *	@tables: used internally
+ *	@flags: family flags
  *	@nops: number of hook ops in this family
  *	@hook_ops_init: initialization function for chain hook ops
  *	@hooks: hookfn overrides for packet validation
@@ -848,6 +862,7 @@
 	unsigned int			nhooks;
 	struct module			*owner;
 	struct list_head		tables;
+	u32				flags;
 	unsigned int			nops;
 	void				(*hook_ops_init)(struct nf_hook_ops *,
 							 unsigned int);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 614a49b..c68926b 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -19,6 +19,7 @@
 struct local_ports {
 	seqlock_t	lock;
 	int		range[2];
+	bool		warned;
 };
 
 struct ping_group_range {
@@ -77,6 +78,8 @@
 	struct local_ports ip_local_ports;
 
 	int sysctl_tcp_ecn;
+	int sysctl_tcp_ecn_fallback;
+
 	int sysctl_ip_no_pmtu_disc;
 	int sysctl_ip_fwd_use_pmtu;
 	int sysctl_ip_nonlocal_bind;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index d2527bf..8d93544 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -34,6 +34,7 @@
 	int fwmark_reflect;
 	int idgen_retries;
 	int idgen_delay;
+	int flowlabel_state_ranges;
 };
 
 struct netns_ipv6 {
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 8874002..532e4ba 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -1,9 +1,9 @@
 #ifndef __NETNS_NETFILTER_H
 #define __NETNS_NETFILTER_H
 
-#include <linux/proc_fs.h>
-#include <linux/netfilter.h>
+#include <linux/netfilter_defs.h>
 
+struct proc_dir_entry;
 struct nf_logger;
 
 struct netns_nf {
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index eee608b..c807811 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,6 +13,7 @@
 	struct nft_af_info	*inet;
 	struct nft_af_info	*arp;
 	struct nft_af_info	*bridge;
+	struct nft_af_info	*netdev;
 	unsigned int		base_seq;
 	u8			gencursor;
 };
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 3573a81..8ba379f 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -31,6 +31,7 @@
 	struct list_head addr_waitq;
 	struct timer_list addr_wq_timer;
 	struct list_head auto_asconf_splist;
+	/* Lock that protects both addr_waitq and auto_asconf_splist */
 	spinlock_t addr_wq_lock;
 
 	/* Lock that protects the local_addr_list writers */
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index 4d6597a..c8a7681 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -2,7 +2,7 @@
 #define __NETNS_X_TABLES_H
 
 #include <linux/list.h>
-#include <linux/netfilter.h>
+#include <linux/netfilter_defs.h>
 
 struct ebt_table;
 
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 020a814..316694d 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -179,6 +179,13 @@
 void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
 void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
 
+static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
+					  struct nfc_vendor_cmd *cmds,
+					  int n_cmds)
+{
+	return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
+}
+
 void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
 
 int nfc_hci_result_to_errno(u8 result);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index a2f2f3d..75d2e18 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -35,6 +35,7 @@
 #define NCI_MAX_NUM_RF_CONFIGS					10
 #define NCI_MAX_NUM_CONN					10
 #define NCI_MAX_PARAM_LEN					251
+#define NCI_MAX_PACKET_SIZE					258
 
 /* NCI Status Codes */
 #define NCI_STATUS_OK						0x00
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index d4dcc71..01fc8c5 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -31,6 +31,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/skbuff.h>
+#include <linux/tty.h>
 
 #include <net/nfc/nfc.h>
 #include <net/nfc/nci.h>
@@ -66,7 +67,14 @@
 
 struct nci_dev;
 
+struct nci_prop_ops {
+	__u16 opcode;
+	int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
+	int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
+};
+
 struct nci_ops {
+	int   (*init)(struct nci_dev *ndev);
 	int   (*open)(struct nci_dev *ndev);
 	int   (*close)(struct nci_dev *ndev);
 	int   (*send)(struct nci_dev *ndev, struct sk_buff *skb);
@@ -84,12 +92,16 @@
 				    struct sk_buff *skb);
 	void  (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
 				  struct sk_buff *skb);
+
+	struct nci_prop_ops *prop_ops;
+	size_t n_prop_ops;
 };
 
 #define NCI_MAX_SUPPORTED_RF_INTERFACES		4
 #define NCI_MAX_DISCOVERED_TARGETS		10
 #define NCI_MAX_NUM_NFCEE   255
 #define NCI_MAX_CONN_ID		7
+#define NCI_MAX_PROPRIETARY_CMD 64
 
 struct nci_conn_info {
 	struct list_head list;
@@ -264,6 +276,8 @@
 		void (*req)(struct nci_dev *ndev,
 			    unsigned long opt),
 		unsigned long opt, __u32 timeout);
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+
 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
 
@@ -318,8 +332,19 @@
 	return ndev->driver_data;
 }
 
+static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
+				      struct nfc_vendor_cmd *cmds,
+				      int n_cmds)
+{
+	return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
+}
+
 void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
 void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb);
+int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb);
 void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
 int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
@@ -367,4 +392,50 @@
 		 struct sk_buff *skb);
 struct sk_buff *nci_spi_read(struct nci_spi *nspi);
 
+/* ----- NCI UART ---- */
+
+/* Ioctl */
+#define NCIUARTSETDRIVER	_IOW('U', 0, char *)
+
+enum nci_uart_driver {
+	NCI_UART_DRIVER_MARVELL = 0,
+	NCI_UART_DRIVER_MAX
+};
+
+struct nci_uart;
+
+struct nci_uart_ops {
+	int (*open)(struct nci_uart *nci_uart);
+	void (*close)(struct nci_uart *nci_uart);
+	int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
+	int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags,
+			int count);
+	int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
+	void (*tx_start)(struct nci_uart *nci_uart);
+	void (*tx_done)(struct nci_uart *nci_uart);
+};
+
+struct nci_uart {
+	struct module		*owner;
+	struct nci_uart_ops	ops;
+	const char		*name;
+	enum nci_uart_driver	driver;
+
+	/* Dynamic data */
+	struct nci_dev		*ndev;
+	spinlock_t		rx_lock;
+	struct work_struct	write_work;
+	struct tty_struct	*tty;
+	unsigned long		tx_state;
+	struct sk_buff_head	tx_q;
+	struct sk_buff		*tx_skb;
+	struct sk_buff		*rx_skb;
+	int			rx_packet_len;
+	void			*drv_data;
+};
+
+int nci_uart_register(struct nci_uart *nu);
+void nci_uart_unregister(struct nci_uart *nu);
+void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl);
+
 #endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 7ac029c..f9e58ae 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -165,6 +165,12 @@
 	struct mutex genl_data_mutex;
 };
 
+struct nfc_vendor_cmd {
+	__u32 vendor_id;
+	__u32 subcmd;
+	int (*doit)(struct nfc_dev *dev, void *data, size_t data_len);
+};
+
 struct nfc_dev {
 	int idx;
 	u32 target_next_idx;
@@ -193,6 +199,9 @@
 
 	struct rfkill *rfkill;
 
+	struct nfc_vendor_cmd *vendor_cmds;
+	int n_vendor_cmds;
+
 	struct nfc_ops *ops;
 };
 #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -296,4 +305,17 @@
 void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
 			  u8 payload_type, u8 direction);
 
+static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
+				      struct nfc_vendor_cmd *cmds,
+				      int n_cmds)
+{
+	if (dev->vendor_cmds || dev->n_vendor_cmds)
+		return -EINVAL;
+
+	dev->vendor_cmds = cmds;
+	dev->n_vendor_cmds = n_cmds;
+
+	return 0;
+}
+
 #endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index f8b5bc9..b0ab530 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -100,6 +100,10 @@
 
 	NL802154_ATTR_EXTENDED_ADDR,
 
+	NL802154_ATTR_WPAN_PHY_CAPS,
+
+	NL802154_ATTR_SUPPORTED_COMMANDS,
+
 	/* add attributes here, update the policy in nl802154.c */
 
 	__NL802154_ATTR_AFTER_LAST,
@@ -120,6 +124,61 @@
 };
 
 /**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ *	nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+	__NL802154_CAP_ATTR_INVALID,
+
+	NL802154_CAP_ATTR_IFTYPES,
+
+	NL802154_CAP_ATTR_CHANNELS,
+	NL802154_CAP_ATTR_TX_POWERS,
+
+	NL802154_CAP_ATTR_CCA_ED_LEVELS,
+	NL802154_CAP_ATTR_CCA_MODES,
+	NL802154_CAP_ATTR_CCA_OPTS,
+
+	NL802154_CAP_ATTR_MIN_MINBE,
+	NL802154_CAP_ATTR_MAX_MINBE,
+
+	NL802154_CAP_ATTR_MIN_MAXBE,
+	NL802154_CAP_ATTR_MAX_MAXBE,
+
+	NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+	NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+	NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+	NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+	NL802154_CAP_ATTR_LBT,
+
+	/* keep last */
+	__NL802154_CAP_ATTR_AFTER_LAST,
+	NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
+/**
  * enum nl802154_cca_modes - cca modes
  *
  * @__NL802154_CCA_INVALID: cca mode number 0 is reserved
@@ -128,7 +187,7 @@
  * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
  * @NL802154_CCA_ALOHA: CCA shall always report an idle medium
  * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
- * @NL802154_CCA_UWB_MULTIPEXED: UWB preamble sense based on the packet with
+ * @NL802154_CCA_UWB_MULTIPLEXED: UWB preamble sense based on the packet with
  *	the multiplexed preamble
  * @__NL802154_CCA_ATTR_AFTER_LAST: Internal
  * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
@@ -140,7 +199,7 @@
 	NL802154_CCA_ENERGY_CARRIER,
 	NL802154_CCA_ALOHA,
 	NL802154_CCA_UWB_SHR,
-	NL802154_CCA_UWB_MULTIPEXED,
+	NL802154_CCA_UWB_MULTIPLEXED,
 
 	/* keep last */
 	__NL802154_CCA_ATTR_AFTER_LAST,
@@ -162,4 +221,26 @@
 	NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
 };
 
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+	NL802154_SUPPORTED_BOOL_FALSE,
+	NL802154_SUPPORTED_BOOL_TRUE,
+	/* to handle them in a mask */
+	__NL802154_SUPPORTED_BOOL_INVALD,
+	NL802154_SUPPORTED_BOOL_BOTH,
+
+	/* keep last */
+	__NL802154_SUPPORTED_BOOL_AFTER_LAST,
+	NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
 #endif /* __NL802154_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 9f4265c..87935ca 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -64,6 +64,7 @@
 	struct timer_list		rsk_timer;
 	const struct request_sock_ops	*rsk_ops;
 	struct sock			*sk;
+	u32				*saved_syn;
 	u32				secid;
 	u32				peer_secid;
 };
@@ -77,7 +78,7 @@
 		req->rsk_ops = ops;
 		sock_hold(sk_listener);
 		req->rsk_listener = sk_listener;
-
+		req->saved_syn = NULL;
 		/* Following is temporary. It is coupled with debugging
 		 * helpers in reqsk_put() & reqsk_free()
 		 */
@@ -104,6 +105,7 @@
 	req->rsk_ops->destructor(req);
 	if (req->rsk_listener)
 		sock_put(req->rsk_listener);
+	kfree(req->saved_syn);
 	kmem_cache_free(req->rsk_ops->slab, req);
 }
 
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6d778ef..2738f6f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -501,12 +501,6 @@
 	return sch->enqueue(skb, sch);
 }
 
-static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
-{
-	qdisc_skb_cb(skb)->pkt_len = skb->len;
-	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
-}
-
 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 {
 	return q->flags & TCQ_F_CPUSTATS;
@@ -745,23 +739,6 @@
 	return rtab->data[slot];
 }
 
-#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
-					    int action)
-{
-	struct sk_buff *n;
-
-	n = skb_clone(skb, gfp_mask);
-
-	if (n) {
-		n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
-		n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
-		n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
-	}
-	return n;
-}
-#endif
-
 struct psched_ratecfg {
 	u64	rate_bytes_ps; /* bytes per second */
 	u32	mult;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2bb2fcf5..495c87e 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -223,6 +223,10 @@
 	atomic_t pd_mode;
 	/* Receive to here while partial delivery is in effect. */
 	struct sk_buff_head pd_lobby;
+
+	/* These must be the last fields, as they will skipped on copies,
+	 * like on accept and peeloff operations
+	 */
 	struct list_head auto_asconf_list;
 	int do_auto_asconf;
 };
diff --git a/include/net/sock.h b/include/net/sock.h
index 3a4898e..14d539c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -184,6 +184,7 @@
 	unsigned char		skc_reuse:4;
 	unsigned char		skc_reuseport:1;
 	unsigned char		skc_ipv6only:1;
+	unsigned char		skc_net_refcnt:1;
 	int			skc_bound_dev_if;
 	union {
 		struct hlist_node	skc_bind_node;
@@ -323,6 +324,7 @@
 #define sk_reuse		__sk_common.skc_reuse
 #define sk_reuseport		__sk_common.skc_reuseport
 #define sk_ipv6only		__sk_common.skc_ipv6only
+#define sk_net_refcnt		__sk_common.skc_net_refcnt
 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
 #define sk_bind_node		__sk_common.skc_bind_node
 #define sk_prot			__sk_common.skc_prot
@@ -924,7 +926,6 @@
 
 /* Networking protocol blocks we attach to sockets.
  * socket layer -> transport layer interface
- * transport -> network interface is defined by struct inet_proto
  */
 struct proto {
 	void			(*close)(struct sock *sk,
@@ -1366,7 +1367,7 @@
  * Functions for memory accounting
  */
 int __sk_mem_schedule(struct sock *sk, int size, int kind);
-void __sk_mem_reclaim(struct sock *sk);
+void __sk_mem_reclaim(struct sock *sk, int amount);
 
 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1407,7 +1408,7 @@
 	if (!sk_has_account(sk))
 		return;
 	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
-		__sk_mem_reclaim(sk);
+		__sk_mem_reclaim(sk, sk->sk_forward_alloc);
 }
 
 static inline void sk_mem_reclaim_partial(struct sock *sk)
@@ -1415,7 +1416,7 @@
 	if (!sk_has_account(sk))
 		return;
 	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
-		__sk_mem_reclaim(sk);
+		__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
 }
 
 static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1514,9 +1515,9 @@
 
 
 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
-		      struct proto *prot);
+		      struct proto *prot, int kern);
 void sk_free(struct sock *sk);
-void sk_release_kernel(struct sock *sk);
+void sk_destruct(struct sock *sk);
 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
 
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -2024,7 +2025,8 @@
 	}
 }
 
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+				    bool force_schedule);
 
 /**
  * sk_page_frag - return an appropriate page_frag
@@ -2192,22 +2194,6 @@
 	write_pnet(&sk->sk_net, net);
 }
 
-/*
- * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a reference to a namespace in order to allow
- * to stop it.
- * Sockets after sk_change_net should be released using sk_release_kernel
- */
-static inline void sk_change_net(struct sock *sk, struct net *net)
-{
-	struct net *current_net = sock_net(sk);
-
-	if (!net_eq(current_net, net)) {
-		put_net(current_net);
-		sock_net_set(sk, net);
-	}
-}
-
 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
 {
 	if (skb->sk) {
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d2e69ee..d5671f1 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -14,154 +14,261 @@
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
 
+#define SWITCHDEV_F_NO_RECURSE		BIT(0)
+
+enum switchdev_trans {
+	SWITCHDEV_TRANS_NONE,
+	SWITCHDEV_TRANS_PREPARE,
+	SWITCHDEV_TRANS_ABORT,
+	SWITCHDEV_TRANS_COMMIT,
+};
+
+enum switchdev_attr_id {
+	SWITCHDEV_ATTR_UNDEFINED,
+	SWITCHDEV_ATTR_PORT_PARENT_ID,
+	SWITCHDEV_ATTR_PORT_STP_STATE,
+	SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+};
+
+struct switchdev_attr {
+	enum switchdev_attr_id id;
+	enum switchdev_trans trans;
+	u32 flags;
+	union {
+		struct netdev_phys_item_id ppid;	/* PORT_PARENT_ID */
+		u8 stp_state;				/* PORT_STP_STATE */
+		unsigned long brport_flags;		/* PORT_BRIDGE_FLAGS */
+	} u;
+};
+
 struct fib_info;
 
+enum switchdev_obj_id {
+	SWITCHDEV_OBJ_UNDEFINED,
+	SWITCHDEV_OBJ_PORT_VLAN,
+	SWITCHDEV_OBJ_IPV4_FIB,
+	SWITCHDEV_OBJ_PORT_FDB,
+};
+
+struct switchdev_obj {
+	enum switchdev_obj_id id;
+	enum switchdev_trans trans;
+	int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
+	union {
+		struct switchdev_obj_vlan {		/* PORT_VLAN */
+			u16 flags;
+			u16 vid_begin;
+			u16 vid_end;
+		} vlan;
+		struct switchdev_obj_ipv4_fib {		/* IPV4_FIB */
+			u32 dst;
+			int dst_len;
+			struct fib_info *fi;
+			u8 tos;
+			u8 type;
+			u32 nlflags;
+			u32 tb_id;
+		} ipv4_fib;
+		struct switchdev_obj_fdb {		/* PORT_FDB */
+			const unsigned char *addr;
+			u16 vid;
+		} fdb;
+	} u;
+};
+
 /**
  * struct switchdev_ops - switchdev operations
  *
- * @swdev_parent_id_get: Called to get an ID of the switch chip this port
- *   is part of.  If driver implements this, it indicates that it
- *   represents a port of a switch chip.
+ * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
  *
- * @swdev_port_stp_update: Called to notify switch device port of bridge
- *   port STP state change.
+ * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
  *
- * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device.
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
  *
- * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device.
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
+ *
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
  */
-struct swdev_ops {
-	int	(*swdev_parent_id_get)(struct net_device *dev,
-				       struct netdev_phys_item_id *psid);
-	int	(*swdev_port_stp_update)(struct net_device *dev, u8 state);
-	int	(*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst,
-				      int dst_len, struct fib_info *fi,
-				      u8 tos, u8 type, u32 nlflags,
-				      u32 tb_id);
-	int	(*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst,
-				      int dst_len, struct fib_info *fi,
-				      u8 tos, u8 type, u32 tb_id);
+struct switchdev_ops {
+	int	(*switchdev_port_attr_get)(struct net_device *dev,
+					   struct switchdev_attr *attr);
+	int	(*switchdev_port_attr_set)(struct net_device *dev,
+					   struct switchdev_attr *attr);
+	int	(*switchdev_port_obj_add)(struct net_device *dev,
+					  struct switchdev_obj *obj);
+	int	(*switchdev_port_obj_del)(struct net_device *dev,
+					  struct switchdev_obj *obj);
+	int	(*switchdev_port_obj_dump)(struct net_device *dev,
+					  struct switchdev_obj *obj);
 };
 
-enum netdev_switch_notifier_type {
-	NETDEV_SWITCH_FDB_ADD = 1,
-	NETDEV_SWITCH_FDB_DEL,
+enum switchdev_notifier_type {
+	SWITCHDEV_FDB_ADD = 1,
+	SWITCHDEV_FDB_DEL,
 };
 
-struct netdev_switch_notifier_info {
+struct switchdev_notifier_info {
 	struct net_device *dev;
 };
 
-struct netdev_switch_notifier_fdb_info {
-	struct netdev_switch_notifier_info info; /* must be first */
+struct switchdev_notifier_fdb_info {
+	struct switchdev_notifier_info info; /* must be first */
 	const unsigned char *addr;
 	u16 vid;
 };
 
 static inline struct net_device *
-netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info)
+switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
 {
 	return info->dev;
 }
 
 #ifdef CONFIG_NET_SWITCHDEV
 
-int netdev_switch_parent_id_get(struct net_device *dev,
-				struct netdev_phys_item_id *psid);
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
-int register_netdev_switch_notifier(struct notifier_block *nb);
-int unregister_netdev_switch_notifier(struct notifier_block *nb);
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
-				 struct netdev_switch_notifier_info *info);
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
-				struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
-				struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
-					       struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
-					       struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
-			       u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
-			       u8 tos, u8 type, u32 tb_id);
-void netdev_switch_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_attr_get(struct net_device *dev,
+			    struct switchdev_attr *attr);
+int switchdev_port_attr_set(struct net_device *dev,
+			    struct switchdev_attr *attr);
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+int register_switchdev_notifier(struct notifier_block *nb);
+int unregister_switchdev_notifier(struct notifier_block *nb);
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+			     struct switchdev_notifier_info *info);
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				  struct net_device *dev, u32 filter_mask,
+				  int nlflags);
+int switchdev_port_bridge_setlink(struct net_device *dev,
+				  struct nlmsghdr *nlh, u16 flags);
+int switchdev_port_bridge_dellink(struct net_device *dev,
+				  struct nlmsghdr *nlh, u16 flags);
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+			   u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+			   u8 tos, u8 type, u32 tb_id);
+void switchdev_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			   struct net_device *dev, const unsigned char *addr,
+			   u16 vid, u16 nlm_flags);
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			   struct net_device *dev, const unsigned char *addr,
+			   u16 vid);
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+			    struct net_device *dev,
+			    struct net_device *filter_dev, int idx);
 
 #else
 
-static inline int netdev_switch_parent_id_get(struct net_device *dev,
-					      struct netdev_phys_item_id *psid)
+static inline int switchdev_port_attr_get(struct net_device *dev,
+					  struct switchdev_attr *attr)
 {
 	return -EOPNOTSUPP;
 }
 
-static inline int netdev_switch_port_stp_update(struct net_device *dev,
-						u8 state)
+static inline int switchdev_port_attr_set(struct net_device *dev,
+					  struct switchdev_attr *attr)
 {
 	return -EOPNOTSUPP;
 }
 
-static inline int register_netdev_switch_notifier(struct notifier_block *nb)
+static inline int switchdev_port_obj_add(struct net_device *dev,
+					 struct switchdev_obj *obj)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_del(struct net_device *dev,
+					 struct switchdev_obj *obj)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_dump(struct net_device *dev,
+					  struct switchdev_obj *obj)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int register_switchdev_notifier(struct notifier_block *nb)
 {
 	return 0;
 }
 
-static inline int unregister_netdev_switch_notifier(struct notifier_block *nb)
+static inline int unregister_switchdev_notifier(struct notifier_block *nb)
 {
 	return 0;
 }
 
-static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
-					       struct netdev_switch_notifier_info *info)
+static inline int call_switchdev_notifiers(unsigned long val,
+					   struct net_device *dev,
+					   struct switchdev_notifier_info *info)
 {
 	return NOTIFY_DONE;
 }
 
-static inline int netdev_switch_port_bridge_setlink(struct net_device *dev,
-						    struct nlmsghdr *nlh,
-						    u16 flags)
+static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
+					    u32 seq, struct net_device *dev,
+					    u32 filter_mask, int nlflags)
 {
 	return -EOPNOTSUPP;
 }
 
-static inline int netdev_switch_port_bridge_dellink(struct net_device *dev,
-						    struct nlmsghdr *nlh,
-						    u16 flags)
+static inline int switchdev_port_bridge_setlink(struct net_device *dev,
+						struct nlmsghdr *nlh,
+						u16 flags)
 {
 	return -EOPNOTSUPP;
 }
 
-static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
-							struct nlmsghdr *nlh,
-							u16 flags)
+static inline int switchdev_port_bridge_dellink(struct net_device *dev,
+						struct nlmsghdr *nlh,
+						u16 flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
+					 struct fib_info *fi,
+					 u8 tos, u8 type,
+					 u32 nlflags, u32 tb_id)
 {
 	return 0;
 }
 
-static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
-							struct nlmsghdr *nlh,
-							u16 flags)
+static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
+					 struct fib_info *fi,
+					 u8 tos, u8 type, u32 tb_id)
 {
 	return 0;
 }
 
-static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len,
-					     struct fib_info *fi,
-					     u8 tos, u8 type,
-					     u32 nlflags, u32 tb_id)
+static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
 {
-	return 0;
 }
 
-static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len,
-					     struct fib_info *fi,
-					     u8 tos, u8 type, u32 tb_id)
+static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+					 struct net_device *dev,
+					 const unsigned char *addr,
+					 u16 vid, u16 nlm_flags)
 {
-	return 0;
+	return -EOPNOTSUPP;
 }
 
-static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+					 struct net_device *dev,
+					 const unsigned char *addr, u16 vid)
 {
+	return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
+					  struct netlink_callback *cb,
+					  struct net_device *dev,
+					  struct net_device *filter_dev,
+					  int idx)
+{
+	return -EOPNOTSUPP;
 }
 
 #endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6d204f3..950cfec 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -286,6 +286,14 @@
 extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
 
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return !!sk->sk_cgrp->memory_pressure;
+
+	return tcp_memory_pressure;
+}
 /*
  * The next routines deal with comparing 32 bit unsigned ints
  * and worry about wraparound (automatic with unsigned arithmetic).
@@ -311,6 +319,8 @@
 	return false;
 }
 
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
 static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 {
 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -326,18 +336,6 @@
 
 bool tcp_check_oom(struct sock *sk, int shift);
 
-/* syncookies: remember time of last synqueue overflow */
-static inline void tcp_synq_overflow(struct sock *sk)
-{
-	tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
-}
-
-/* syncookies: no recent synqueue overflow on this listening socket? */
-static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
-{
-	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
-	return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
-}
 
 extern struct proto tcp_prot;
 
@@ -471,6 +469,9 @@
 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 
 /* From syncookies.c */
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+				 struct request_sock *req,
+				 struct dst_entry *dst);
 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 		      u32 cookie);
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -483,13 +484,35 @@
  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
  * the counter advances immediately after a cookie is generated).
  */
-#define MAX_SYNCOOKIE_AGE 2
+#define MAX_SYNCOOKIE_AGE	2
+#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
+#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
+
+/* syncookies: remember time of last synqueue overflow
+ * But do not dirty this field too often (once per second is enough)
+ */
+static inline void tcp_synq_overflow(struct sock *sk)
+{
+	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+	unsigned long now = jiffies;
+
+	if (time_after(now, last_overflow + HZ))
+		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+
+	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+}
 
 static inline u32 tcp_cookie_time(void)
 {
 	u64 val = get_jiffies_64();
 
-	do_div(val, 60 * HZ);
+	do_div(val, TCP_SYNCOOKIE_PERIOD);
 	return val;
 }
 
@@ -527,7 +550,7 @@
 
 void tcp_send_probe0(struct sock *);
 void tcp_send_partial(struct sock *);
-int tcp_write_wakeup(struct sock *);
+int tcp_write_wakeup(struct sock *, int mib);
 void tcp_send_fin(struct sock *sk);
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
@@ -692,6 +715,8 @@
 #define TCPHDR_ECE 0x40
 #define TCPHDR_CWR 0x80
 
+#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+
 /* This is what the send packet queuing engine uses to pass
  * TCP per-packet control information to the transmission code.
  * We also store the host-order sequence numbers in here too.
@@ -705,11 +730,14 @@
 		/* Note : tcp_tw_isn is used in input path only
 		 *	  (isn chosen by tcp_timewait_state_process())
 		 *
-		 * 	  tcp_gso_segs is used in write queue only,
-		 *	  cf tcp_skb_pcount()
+		 * 	  tcp_gso_segs/size are used in write queue only,
+		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 		 */
 		__u32		tcp_tw_isn;
-		__u32		tcp_gso_segs;
+		struct {
+			u16	tcp_gso_segs;
+			u16	tcp_gso_size;
+		};
 	};
 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 
@@ -765,10 +793,10 @@
 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
 }
 
-/* This is valid iff tcp_skb_pcount() > 1. */
+/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
 static inline int tcp_skb_mss(const struct sk_buff *skb)
 {
-	return skb_shinfo(skb)->gso_size;
+	return TCP_SKB_CB(skb)->tcp_gso_size;
 }
 
 /* Events passed to congestion control interface */
@@ -1043,14 +1071,31 @@
 	return tp->is_cwnd_limited;
 }
 
+/* Something is really bad, we could not queue an additional packet,
+ * because qdisc is full or receiver sent a 0 window.
+ * We do not want to add fuel to the fire, or abort too early,
+ * so make sure the timer we arm now is at least 200ms in the future,
+ * regardless of current icsk_rto value (as it could be ~2ms)
+ */
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
+{
+	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
+}
+
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
+					    unsigned long max_when)
+{
+	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
+
+	return (unsigned long)min_t(u64, when, max_when);
+}
+
 static inline void tcp_check_probe_timer(struct sock *sk)
 {
-	const struct tcp_sock *tp = tcp_sk(sk);
-	const struct inet_connection_sock *icsk = inet_csk(sk);
-
-	if (!tp->packets_out && !icsk->icsk_pending)
+	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-					  icsk->icsk_rto, TCP_RTO_MAX);
+					  tcp_probe0_base(sk), TCP_RTO_MAX);
 }
 
 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a9ebdf5..29ef6f9 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -113,6 +113,7 @@
 	BPF_MAP_TYPE_UNSPEC,
 	BPF_MAP_TYPE_HASH,
 	BPF_MAP_TYPE_ARRAY,
+	BPF_MAP_TYPE_PROG_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -210,6 +211,44 @@
 	 * Return: 0 on success
 	 */
 	BPF_FUNC_l4_csum_replace,
+
+	/**
+	 * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+	 * @ctx: context pointer passed to next program
+	 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+	 * @index: index inside array that selects specific program to run
+	 * Return: 0 on success
+	 */
+	BPF_FUNC_tail_call,
+
+	/**
+	 * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
+	 * @skb: pointer to skb
+	 * @ifindex: ifindex of the net device
+	 * @flags: bit 0 - if set, redirect to ingress instead of egress
+	 *         other bits - reserved
+	 * Return: 0 on success
+	 */
+	BPF_FUNC_clone_redirect,
+
+	/**
+	 * u64 bpf_get_current_pid_tgid(void)
+	 * Return: current->tgid << 32 | current->pid
+	 */
+	BPF_FUNC_get_current_pid_tgid,
+
+	/**
+	 * u64 bpf_get_current_uid_gid(void)
+	 * Return: current_gid << 32 | current_uid
+	 */
+	BPF_FUNC_get_current_uid_gid,
+
+	/**
+	 * bpf_get_current_comm(char *buf, int size_of_buf)
+	 * stores current->comm into buf
+	 * Return: 0 on success
+	 */
+	BPF_FUNC_get_current_comm,
 	__BPF_FUNC_MAX_ID,
 };
 
@@ -226,6 +265,10 @@
 	__u32 vlan_tci;
 	__u32 vlan_proto;
 	__u32 priority;
+	__u32 ingress_ifindex;
+	__u32 ifindex;
+	__u32 tc_index;
+	__u32 cb[5];
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 41892f7..9692cda 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -95,11 +95,17 @@
  * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
  *           N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
  *           mapping of the 'data length code' to the real payload length
+ * @__pad:   padding
+ * @__res0:  reserved / padding
+ * @__res1:  reserved / padding
  * @data:    CAN frame payload (up to 8 byte)
  */
 struct can_frame {
 	canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
 	__u8    can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
+	__u8    __pad;   /* padding */
+	__u8    __res0;  /* reserved / padding */
+	__u8    __res1;  /* reserved / padding */
 	__u8    data[CAN_MAX_DLEN] __attribute__((aligned(8)));
 };
 
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3e6184c..5079b9d 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -78,6 +78,7 @@
 	CGW_FILTER,	/* specify struct can_filter on source CAN device */
 	CGW_DELETED,	/* number of deleted CAN frames (see max_hops param) */
 	CGW_LIM_HOPS,	/* limit the number of hops of this specific rule */
+	CGW_MOD_UID,	/* user defined identifier for modification updates */
 	__CGW_MAX
 };
 
@@ -162,6 +163,10 @@
  * load time of the can-gw module). This value is used to reduce the number of
  * possible hops for this gateway rule to a value smaller then max_hops.
  *
+ * CGW_MOD_UID (length 4 bytes):
+ * Optional non-zero user defined routing job identifier to alter existing
+ * modification settings at runtime.
+ *
  * CGW_CS_XOR (length 4 bytes):
  * Set a simple XOR checksum starting with an initial value into
  * data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 6497d79..3ea470f 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -207,8 +207,7 @@
 #define IEEE_8021QAZ_APP_SEL_ANY	4
 
 /* This structure contains the IEEE 802.1Qaz APP managed object. This
- * object is also used for the CEE std as well. There is no difference
- * between the objects.
+ * object is also used for the CEE std as well.
  *
  * @selector: protocol identifier type
  * @protocol: protocol of type indicated
@@ -216,13 +215,18 @@
  *            8-bit 802.1p user priority bitmap for CEE
  *
  * ----
- *  Selector field values
+ *  Selector field values for IEEE 802.1Qaz
  *	0	Reserved
  *	1	Ethertype
  *	2	Well known port number over TCP or SCTP
  *	3	Well known port number over UDP or DCCP
  *	4	Well known port number over TCP, SCTP, UDP, or DCCP
  *	5-7	Reserved
+ *
+ *  Selector field values for CEE
+ *	0	Ethertype
+ *	1	Well known port number over TCP or UDP
+ *	2-3	Reserved
  */
 struct dcb_app {
 	__u8	selector;
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2e49fc8..cd67aec 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -215,6 +215,11 @@
 	ETHTOOL_ID_UNSPEC,
 	ETHTOOL_RX_COPYBREAK,
 	ETHTOOL_TX_COPYBREAK,
+	/*
+	 * Add your fresh new tubale attribute above and remember to update
+	 * tunable_strings[] in net/core/ethtool.c
+	 */
+	__ETHTOOL_TUNABLE_COUNT,
 };
 
 enum tunable_type_id {
@@ -545,6 +550,7 @@
 	ETH_SS_NTUPLE_FILTERS,
 	ETH_SS_FEATURES,
 	ETH_SS_RSS_HASH_FUNCS,
+	ETH_SS_TUNABLES,
 };
 
 /**
@@ -796,6 +802,31 @@
 	__u32		location;
 };
 
+/* How rings are layed out when accessing virtual functions or
+ * offloaded queues is device specific. To allow users to do flow
+ * steering and specify these queues the ring cookie is partitioned
+ * into a 32bit queue index with an 8 bit virtual function id.
+ * This also leaves the 3bytes for further specifiers. It is possible
+ * future devices may support more than 256 virtual functions if
+ * devices start supporting PCIe w/ARI. However at the moment I
+ * do not know of any devices that support this so I do not reserve
+ * space for this at this time. If a future patch consumes the next
+ * byte it should be aware of this possiblity.
+ */
+#define ETHTOOL_RX_FLOW_SPEC_RING	0x00000000FFFFFFFFLL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF	0x000000FF00000000LL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+	return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+	return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+				ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+
 /**
  * struct ethtool_rxnfc - command to get or set RX flow classification rules
  * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
@@ -1264,15 +1295,19 @@
  * it was forced up into this mode or autonegotiated.
  */
 
-/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
 #define SPEED_10		10
 #define SPEED_100		100
 #define SPEED_1000		1000
 #define SPEED_2500		2500
+#define SPEED_5000		5000
 #define SPEED_10000		10000
 #define SPEED_20000		20000
+#define SPEED_25000		25000
 #define SPEED_40000		40000
+#define SPEED_50000		50000
 #define SPEED_56000		56000
+#define SPEED_100000		100000
 
 #define SPEED_UNKNOWN		-1
 
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index d9cd192..2c7e8e3 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -390,6 +390,17 @@
 	__be16	high;
 };
 
+/* GENEVE section */
+enum {
+	IFLA_GENEVE_UNSPEC,
+	IFLA_GENEVE_ID,
+	IFLA_GENEVE_REMOTE,
+	IFLA_GENEVE_TTL,
+	IFLA_GENEVE_TOS,
+	__IFLA_GENEVE_MAX
+};
+#define IFLA_GENEVE_MAX	(__IFLA_GENEVE_MAX - 1)
+
 /* Bonding section */
 
 enum {
@@ -417,6 +428,9 @@
 	IFLA_BOND_AD_LACP_RATE,
 	IFLA_BOND_AD_SELECT,
 	IFLA_BOND_AD_INFO,
+	IFLA_BOND_AD_ACTOR_SYS_PRIO,
+	IFLA_BOND_AD_USER_PORT_KEY,
+	IFLA_BOND_AD_ACTOR_SYSTEM,
 	__IFLA_BOND_MAX,
 };
 
@@ -442,6 +456,8 @@
 	IFLA_BOND_SLAVE_PERM_HWADDR,
 	IFLA_BOND_SLAVE_QUEUE_ID,
 	IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+	IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
+	IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
 	__IFLA_BOND_SLAVE_MAX,
 };
 
@@ -468,6 +484,7 @@
 	IFLA_VF_RSS_QUERY_EN,	/* RSS Redirection Table and Hash Key query
 				 * on/off switch
 				 */
+	IFLA_VF_STATS,		/* network device statistics */
 	__IFLA_VF_MAX,
 };
 
@@ -517,6 +534,18 @@
 	__u32 setting;
 };
 
+enum {
+	IFLA_VF_STATS_RX_PACKETS,
+	IFLA_VF_STATS_TX_PACKETS,
+	IFLA_VF_STATS_RX_BYTES,
+	IFLA_VF_STATS_TX_BYTES,
+	IFLA_VF_STATS_BROADCAST,
+	IFLA_VF_STATS_MULTICAST,
+	__IFLA_VF_STATS_MAX,
+};
+
+#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
+
 /* VF ports management section
  *
  *	Nested layout of set/get msg is:
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 053bd10..d3d715f8c 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -54,6 +54,7 @@
 #define PACKET_FANOUT			18
 #define PACKET_TX_HAS_OFF		19
 #define PACKET_QDISC_BYPASS		20
+#define PACKET_ROLLOVER_STATS		21
 
 #define PACKET_FANOUT_HASH		0
 #define PACKET_FANOUT_LB		1
@@ -75,6 +76,12 @@
 	unsigned int	tp_freeze_q_cnt;
 };
 
+struct tpacket_rollover_stats {
+	__aligned_u64	tp_all;
+	__aligned_u64	tp_huge;
+	__aligned_u64	tp_failed;
+};
+
 union tpacket_stats_u {
 	struct tpacket_stats stats1;
 	struct tpacket_stats_v3 stats3;
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 589ced0..83d6236 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -69,6 +69,8 @@
 #define IPPROTO_SCTP		IPPROTO_SCTP
   IPPROTO_UDPLITE = 136,	/* UDP-Lite (RFC 3828)			*/
 #define IPPROTO_UDPLITE		IPPROTO_UDPLITE
+  IPPROTO_MPLS = 137,		/* MPLS in IP (RFC 4023)		*/
+#define IPPROTO_MPLS		IPPROTO_MPLS
   IPPROTO_RAW = 255,		/* Raw IP packets			*/
 #define IPPROTO_RAW		IPPROTO_RAW
   IPPROTO_MAX
@@ -110,6 +112,7 @@
 #define IP_MINTTL       21
 #define IP_NODEFRAG     22
 #define IP_CHECKSUM	23
+#define IP_BIND_ADDRESS_NO_PORT	24
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT		0	/* Never send DF frames */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index c7093c7..68a1f71 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -111,9 +111,11 @@
 	INET_DIAG_SKMEMINFO,
 	INET_DIAG_SHUTDOWN,
 	INET_DIAG_DCTCPINFO,
+	INET_DIAG_PROTOCOL,  /* response attribute only */
+	INET_DIAG_SKV6ONLY,
 };
 
-#define INET_DIAG_MAX INET_DIAG_DCTCPINFO
+#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
 
 /* INET_DIAG_MEM */
 
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 4119594..08f894d 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -164,6 +164,7 @@
 	IPV4_DEVCONF_ROUTE_LOCALNET,
 	IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
 	IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+	IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
 	__IPV4_DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 2be7bd1..f6598d1 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,6 +34,7 @@
 #define RTF_PREF(pref)	((pref) << 27)
 #define RTF_PREF_MASK	0x18000000
 
+#define RTF_PCPU	0x40000000
 #define RTF_LOCAL	0x80000000
 
 
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef1b1f8..d93f949 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -4,7 +4,8 @@
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/sysctl.h>
-
+#include <linux/in.h>
+#include <linux/in6.h>
 
 /* Responses from hook functions. */
 #define NF_DROP 0
@@ -51,11 +52,17 @@
 	NF_INET_NUMHOOKS
 };
 
+enum nf_dev_hooks {
+	NF_NETDEV_INGRESS,
+	NF_NETDEV_NUMHOOKS
+};
+
 enum {
 	NFPROTO_UNSPEC =  0,
 	NFPROTO_INET   =  1,
 	NFPROTO_IPV4   =  2,
 	NFPROTO_ARP    =  3,
+	NFPROTO_NETDEV =  5,
 	NFPROTO_BRIDGE =  7,
 	NFPROTO_IPV6   = 10,
 	NFPROTO_DECNET = 12,
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 5ab4e60..63b2e34 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -15,12 +15,12 @@
 /* The protocol version */
 #define IPSET_PROTOCOL		6
 
-/* The maximum permissible comment length we will accept over netlink */
-#define IPSET_MAX_COMMENT_SIZE	255
-
 /* The max length of strings including NUL: set and type identifiers */
 #define IPSET_MAXNAMELEN	32
 
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE	255
+
 /* Message types and commands */
 enum ipset_cmd {
 	IPSET_CMD_NONE,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 5fa1cd0..a99e6a9 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -122,11 +122,13 @@
  *
  * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
  * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
  */
 enum nft_hook_attributes {
 	NFTA_HOOK_UNSPEC,
 	NFTA_HOOK_HOOKNUM,
 	NFTA_HOOK_PRIORITY,
+	NFTA_HOOK_DEV,
 	__NFTA_HOOK_MAX
 };
 #define NFTA_HOOK_MAX		(__NFTA_HOOK_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 8dd819e..b67a853 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -49,6 +49,7 @@
 	NFQA_EXP,			/* nf_conntrack_netlink.h */
 	NFQA_UID,			/* __u32 sk uid */
 	NFQA_GID,			/* __u32 sk gid */
+	NFQA_SECCTX,			/* security context string */
 
 	__NFQA_MAX
 };
@@ -102,7 +103,8 @@
 #define NFQA_CFG_F_CONNTRACK			(1 << 1)
 #define NFQA_CFG_F_GSO				(1 << 2)
 #define NFQA_CFG_F_UID_GID			(1 << 3)
-#define NFQA_CFG_F_MAX				(1 << 4)
+#define NFQA_CFG_F_SECCTX			(1 << 4)
+#define NFQA_CFG_F_MAX				(1 << 5)
 
 /* flags for NFQA_SKB_INFO */
 /* packet appears to have wrong checksums, but they are ok */
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 6315e2a..87644f8 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -6,6 +6,7 @@
 enum {
 	XT_SOCKET_TRANSPARENT = 1 << 0,
 	XT_SOCKET_NOWILDCARD = 1 << 1,
+	XT_SOCKET_RESTORESKMARK = 1 << 2,
 };
 
 struct xt_socket_mtinfo1 {
@@ -18,4 +19,11 @@
 };
 #define XT_SOCKET_FLAGS_V2 (XT_SOCKET_TRANSPARENT | XT_SOCKET_NOWILDCARD)
 
+struct xt_socket_mtinfo3 {
+	__u8 flags;
+};
+#define XT_SOCKET_FLAGS_V3 (XT_SOCKET_TRANSPARENT \
+			   | XT_SOCKET_NOWILDCARD \
+			   | XT_SOCKET_RESTORESKMARK)
+
 #endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 1a85940..cf6a65c 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -101,13 +101,15 @@
 	struct nlmsghdr msg;
 };
 
-#define NETLINK_ADD_MEMBERSHIP	1
-#define NETLINK_DROP_MEMBERSHIP	2
-#define NETLINK_PKTINFO		3
-#define NETLINK_BROADCAST_ERROR	4
-#define NETLINK_NO_ENOBUFS	5
-#define NETLINK_RX_RING		6
-#define NETLINK_TX_RING		7
+#define NETLINK_ADD_MEMBERSHIP		1
+#define NETLINK_DROP_MEMBERSHIP		2
+#define NETLINK_PKTINFO			3
+#define NETLINK_BROADCAST_ERROR		4
+#define NETLINK_NO_ENOBUFS		5
+#define NETLINK_RX_RING			6
+#define NETLINK_TX_RING			7
+#define NETLINK_LISTEN_ALL_NSID		8
+#define NETLINK_LIST_MEMBERSHIPS	9
 
 struct nl_pktinfo {
 	__u32	group;
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index c1e2e63..dd3f753 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -86,6 +86,8 @@
  *	for this event is the application ID (AID).
  * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
  * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
+ * @NFC_CMD_VENDOR: Vendor specific command, to be implemented directly
+ *	from the driver in order to support hardware specific operations.
  */
 enum nfc_commands {
 	NFC_CMD_UNSPEC,
@@ -117,6 +119,7 @@
 	NFC_CMD_GET_SE,
 	NFC_CMD_SE_IO,
 	NFC_CMD_ACTIVATE_TARGET,
+	NFC_CMD_VENDOR,
 /* private: internal use only */
 	__NFC_CMD_AFTER_LAST
 };
@@ -153,6 +156,10 @@
  * @NFC_ATTR_APDU: Secure element APDU
  * @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
  * @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
+ * @NFC_ATTR_VENDOR_ID: NFC manufacturer unique ID, typically an OUI
+ * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command
+ * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed
+ *	to a vendor specific command implementation
  */
 enum nfc_attrs {
 	NFC_ATTR_UNSPEC,
@@ -184,6 +191,9 @@
 	NFC_ATTR_TARGET_ISO15693_DSFID,
 	NFC_ATTR_TARGET_ISO15693_UID,
 	NFC_ATTR_SE_PARAMS,
+	NFC_ATTR_VENDOR_ID,
+	NFC_ATTR_VENDOR_SUBCMD,
+	NFC_ATTR_VENDOR_DATA,
 /* private: internal use only */
 	__NFC_ATTR_AFTER_LAST
 };
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 241220c..c0ab6b0 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2620,16 +2620,17 @@
  *	an indoor surroundings, i.e., it is connected to AC power (and not
  *	through portable DC inverters) or is under the control of a master
  *	that is acting as an AP and is connected to AC power.
- * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this
  *	channel if it's connected concurrently to a BSS on the same channel on
  *	the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
- *	band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
- *	channel that has the GO_CONCURRENT attribute set can be done when there
- *	is a clear assessment that the device is operating under the guidance of
- *	an authorized master, i.e., setting up a GO while the device is also
- *	connected to an AP with DFS and radar detection on the UNII band (it is
- *	up to user-space, i.e., wpa_supplicant to perform the required
- *	verifications)
+ *	band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS
+ *	off-channel on a channel that has the IR_CONCURRENT attribute set can be
+ *	done when there is a clear assessment that the device is operating under
+ *	the guidance of an authorized master, i.e., setting up a GO or TDLS
+ *	off-channel while the device is also connected to an AP with DFS and
+ *	radar detection on the UNII band (it is up to user-space, i.e.,
+ *	wpa_supplicant to perform the required verifications). Using this
+ *	attribute for IR is disallowed for master interfaces (IBSS, AP).
  * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
  *	on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
@@ -2641,7 +2642,7 @@
  * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
  * for more information on the FCC description of the relaxations allowed
  * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
- * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
+ * NL80211_FREQUENCY_ATTR_IR_CONCURRENT.
  */
 enum nl80211_frequency_attr {
 	__NL80211_FREQUENCY_ATTR_INVALID,
@@ -2659,7 +2660,7 @@
 	NL80211_FREQUENCY_ATTR_NO_160MHZ,
 	NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
 	NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
-	NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+	NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
 	NL80211_FREQUENCY_ATTR_NO_20MHZ,
 	NL80211_FREQUENCY_ATTR_NO_10MHZ,
 
@@ -2672,6 +2673,8 @@
 #define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN	NL80211_FREQUENCY_ATTR_NO_IR
 #define NL80211_FREQUENCY_ATTR_NO_IBSS		NL80211_FREQUENCY_ATTR_NO_IR
 #define NL80211_FREQUENCY_ATTR_NO_IR		NL80211_FREQUENCY_ATTR_NO_IR
+#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
+					NL80211_FREQUENCY_ATTR_IR_CONCURRENT
 
 /**
  * enum nl80211_bitrate_attr - bitrate attributes
@@ -2830,7 +2833,7 @@
  * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
  *	base on contiguous rules and wider channels will be allowed to cross
  *	multiple contiguous/overlapping frequency ranges.
- * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
  * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
  * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
  * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -2847,7 +2850,7 @@
 	NL80211_RRF_NO_IR		= 1<<7,
 	__NL80211_RRF_NO_IBSS		= 1<<8,
 	NL80211_RRF_AUTO_BW		= 1<<11,
-	NL80211_RRF_GO_CONCURRENT	= 1<<12,
+	NL80211_RRF_IR_CONCURRENT	= 1<<12,
 	NL80211_RRF_NO_HT40MINUS	= 1<<13,
 	NL80211_RRF_NO_HT40PLUS		= 1<<14,
 	NL80211_RRF_NO_80MHZ		= 1<<15,
@@ -2859,6 +2862,7 @@
 #define NL80211_RRF_NO_IR		NL80211_RRF_NO_IR
 #define NL80211_RRF_NO_HT40		(NL80211_RRF_NO_HT40MINUS |\
 					 NL80211_RRF_NO_HT40PLUS)
+#define NL80211_RRF_GO_CONCURRENT	NL80211_RRF_IR_CONCURRENT
 
 /* For backport compatibility with older userspace */
 #define NL80211_RRF_NO_IR_ALL		(NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index bbd49a0..1dab776 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -153,6 +153,8 @@
  * flow key against the kernel's.
  * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet.  Used
  * for %OVS_PACKET_CMD_EXECUTE.  It has nested %OVS_ACTION_ATTR_* attributes.
+ * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional
+ * %OVS_USERSPACE_ATTR_ACTIONS attribute.
  * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
  * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
  * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
@@ -528,6 +530,7 @@
  * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
  * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
  * tunnel info.
+ * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
  */
 enum ovs_userspace_attr {
 	OVS_USERSPACE_ATTR_UNSPEC,
@@ -535,6 +538,7 @@
 	OVS_USERSPACE_ATTR_USERDATA,  /* Optional user-specified cookie. */
 	OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,  /* Optional, u32 output port
 					      * to get tunnel info. */
+	OVS_USERSPACE_ATTR_ACTIONS,   /* Optional flag to get actions. */
 	__OVS_USERSPACE_ATTR_MAX
 };
 
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index bf08e76..4f0d1bc 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <linux/pkt_sched.h>
 
+#ifdef __KERNEL__
 /* I think i could have done better macros ; for now this is stolen from
  * some arch/mips code - jhs
 */
@@ -35,20 +36,6 @@
  *
  * */
 
-#define TC_MUNGED          _TC_MAKEMASK1(0)
-#define SET_TC_MUNGED(v)   ( TC_MUNGED | (v & ~TC_MUNGED))
-#define CLR_TC_MUNGED(v)   ( v & ~TC_MUNGED)
-
-#define TC_OK2MUNGE        _TC_MAKEMASK1(1)
-#define SET_TC_OK2MUNGE(v)   ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
-#define CLR_TC_OK2MUNGE(v)   ( v & ~TC_OK2MUNGE)
-
-#define S_TC_VERD          _TC_MAKE32(2)
-#define M_TC_VERD          _TC_MAKEMASK(4,S_TC_VERD)
-#define G_TC_VERD(x)       _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
-#define V_TC_VERD(x)       _TC_MAKEVALUE(x,S_TC_VERD)
-#define SET_TC_VERD(v,n)   ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
-
 #define S_TC_FROM          _TC_MAKE32(6)
 #define M_TC_FROM          _TC_MAKEMASK(2,S_TC_FROM)
 #define G_TC_FROM(x)       _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
@@ -62,18 +49,16 @@
 #define SET_TC_NCLS(v)   ( TC_NCLS | (v & ~TC_NCLS))
 #define CLR_TC_NCLS(v)   ( v & ~TC_NCLS)
 
-#define S_TC_RTTL          _TC_MAKE32(9)
-#define M_TC_RTTL          _TC_MAKEMASK(3,S_TC_RTTL)
-#define G_TC_RTTL(x)       _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL)
-#define V_TC_RTTL(x)       _TC_MAKEVALUE(x,S_TC_RTTL)
-#define SET_TC_RTTL(v,n)   ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL))
-
 #define S_TC_AT          _TC_MAKE32(12)
 #define M_TC_AT          _TC_MAKEMASK(2,S_TC_AT)
 #define G_TC_AT(x)       _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
 #define V_TC_AT(x)       _TC_MAKEVALUE(x,S_TC_AT)
 #define SET_TC_AT(v,n)   ((V_TC_AT(n)) | (v & ~M_TC_AT))
 
+#define MAX_REC_LOOP 4
+#define MAX_RED_LOOP 4
+#endif
+
 /* Action attributes */
 enum {
 	TCA_ACT_UNSPEC,
@@ -93,8 +78,6 @@
 #define TCA_ACT_NOUNBIND	0
 #define TCA_ACT_REPLACE		1
 #define TCA_ACT_NOREPLACE	0
-#define MAX_REC_LOOP 4
-#define MAX_RED_LOOP 4
 
 #define TC_ACT_UNSPEC	(-1)
 #define TC_ACT_OK		0
@@ -404,6 +387,36 @@
 
 #define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
 
+/* Flower classifier */
+
+enum {
+	TCA_FLOWER_UNSPEC,
+	TCA_FLOWER_CLASSID,
+	TCA_FLOWER_INDEV,
+	TCA_FLOWER_ACT,
+	TCA_FLOWER_KEY_ETH_DST,		/* ETH_ALEN */
+	TCA_FLOWER_KEY_ETH_DST_MASK,	/* ETH_ALEN */
+	TCA_FLOWER_KEY_ETH_SRC,		/* ETH_ALEN */
+	TCA_FLOWER_KEY_ETH_SRC_MASK,	/* ETH_ALEN */
+	TCA_FLOWER_KEY_ETH_TYPE,	/* be16 */
+	TCA_FLOWER_KEY_IP_PROTO,	/* u8 */
+	TCA_FLOWER_KEY_IPV4_SRC,	/* be32 */
+	TCA_FLOWER_KEY_IPV4_SRC_MASK,	/* be32 */
+	TCA_FLOWER_KEY_IPV4_DST,	/* be32 */
+	TCA_FLOWER_KEY_IPV4_DST_MASK,	/* be32 */
+	TCA_FLOWER_KEY_IPV6_SRC,	/* struct in6_addr */
+	TCA_FLOWER_KEY_IPV6_SRC_MASK,	/* struct in6_addr */
+	TCA_FLOWER_KEY_IPV6_DST,	/* struct in6_addr */
+	TCA_FLOWER_KEY_IPV6_DST_MASK,	/* struct in6_addr */
+	TCA_FLOWER_KEY_TCP_SRC,		/* be16 */
+	TCA_FLOWER_KEY_TCP_DST,		/* be16 */
+	TCA_FLOWER_KEY_UDP_SRC,		/* be16 */
+	TCA_FLOWER_KEY_UDP_DST,		/* be16 */
+	__TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
 /* Extended Matches */
 
 struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 534b847..8d2530d 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -268,7 +268,8 @@
        TCA_GRED_STAB,
        TCA_GRED_DPS,
        TCA_GRED_MAX_P,
-	   __TCA_GRED_MAX,
+       TCA_GRED_LIMIT,
+       __TCA_GRED_MAX,
 };
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
@@ -679,6 +680,7 @@
 	TCA_CODEL_LIMIT,
 	TCA_CODEL_INTERVAL,
 	TCA_CODEL_ECN,
+	TCA_CODEL_CE_THRESHOLD,
 	__TCA_CODEL_MAX
 };
 
@@ -695,6 +697,7 @@
 	__u32	drop_overlimit; /* number of time max qdisc packet limit was hit */
 	__u32	ecn_mark;  /* number of packets we ECN marked instead of dropped */
 	__u32	dropping;  /* are we in dropping state ? */
+	__u32	ce_mark;   /* number of CE marked packets because of ce_threshold */
 };
 
 /* FQ_CODEL */
@@ -707,6 +710,7 @@
 	TCA_FQ_CODEL_ECN,
 	TCA_FQ_CODEL_FLOWS,
 	TCA_FQ_CODEL_QUANTUM,
+	TCA_FQ_CODEL_CE_THRESHOLD,
 	__TCA_FQ_CODEL_MAX
 };
 
@@ -730,6 +734,7 @@
 				 */
 	__u32	new_flows_len;	/* count of flows in new list */
 	__u32	old_flows_len;	/* count of flows in old list */
+	__u32	ce_mark;	/* packets above ce_threshold */
 };
 
 struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 9195095..0f9265c 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -38,6 +38,8 @@
 
 #define RDS_IB_ABI_VERSION		0x301
 
+#define	SOL_RDS		276
+
 /*
  * setsockopt/getsockopt for SOL_RDS
  */
@@ -48,6 +50,14 @@
 #define RDS_RECVERR			5
 #define RDS_CONG_MONITOR		6
 #define RDS_GET_MR_FOR_DEST		7
+#define SO_RDS_TRANSPORT		8
+
+/* supported values for SO_RDS_TRANSPORT */
+#define	RDS_TRANS_IB	0
+#define	RDS_TRANS_IWARP	1
+#define	RDS_TRANS_TCP	2
+#define RDS_TRANS_COUNT	3
+#define	RDS_TRANS_NONE	(~0)
 
 /*
  * Control message types for SOL_RDS.
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 17fb02f..fdd8f07 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -338,6 +338,9 @@
 #define RTNH_F_PERVASIVE	2	/* Do recursive gateway lookup	*/
 #define RTNH_F_ONLINK		4	/* Gateway is forced on link	*/
 #define RTNH_F_OFFLOAD		8	/* offloaded route */
+#define RTNH_F_LINKDOWN		16	/* carrier-down on nexthop */
+
+#define RTNH_COMPARE_MASK	(RTNH_F_DEAD | RTNH_F_LINKDOWN)
 
 /* Macros to handle hexthops */
 
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 6a6fb74..eee8968 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -276,6 +276,8 @@
 	LINUX_MIB_TCPACKSKIPPEDFINWAIT2,	/* TCPACKSkippedFinWait2 */
 	LINUX_MIB_TCPACKSKIPPEDTIMEWAIT,	/* TCPACKSkippedTimeWait */
 	LINUX_MIB_TCPACKSKIPPEDCHALLENGE,	/* TCPACKSkippedChallenge */
+	LINUX_MIB_TCPWINPROBE,			/* TCPWinProbe */
+	LINUX_MIB_TCPKEEPALIVE,			/* TCPKeepAlive */
 	__LINUX_MIB_MAX
 };
 
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index b00e29e..49230d3 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -23,4 +23,14 @@
 	SK_MEMINFO_VARS,
 };
 
+enum sknetlink_groups {
+	SKNLGRP_NONE,
+	SKNLGRP_INET_TCP_DESTROY,
+	SKNLGRP_INET_UDP_DESTROY,
+	SKNLGRP_INET6_TCP_DESTROY,
+	SKNLGRP_INET6_UDP_DESTROY,
+	__SKNLGRP_MAX,
+};
+#define SKNLGRP_MAX	(__SKNLGRP_MAX - 1)
+
 #endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index faa72f4..65a77b0 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -113,6 +113,8 @@
 #define TCP_TIMESTAMP		24
 #define TCP_NOTSENT_LOWAT	25	/* limit number of unsent bytes in write queue */
 #define TCP_CC_INFO		26	/* Get Congestion Control (optional) info */
+#define TCP_SAVE_SYN		27	/* Record SYN headers for new connections */
+#define TCP_SAVED_SYN		28	/* Get SYN headers recorded for connection */
 
 struct tcp_repair_opt {
 	__u32	opt_code;
@@ -190,8 +192,10 @@
 
 	__u64	tcpi_pacing_rate;
 	__u64	tcpi_max_pacing_rate;
-	__u64	tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+	__u64	tcpi_bytes_acked;    /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
 	__u64	tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
+	__u32	tcpi_segs_out;	     /* RFC4898 tcpEStatsPerfSegsOut */
+	__u32	tcpi_segs_in;	     /* RFC4898 tcpEStatsPerfSegsIn */
 };
 
 /* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index dac199a..01c4410 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -34,5 +34,6 @@
 #define N_TI_WL		22	/* for TI's WL BT, FM, GPS combo chips */
 #define N_TRACESINK	23	/* Trace data routing for MIPI P1149.7 */
 #define N_TRACEROUTER	24	/* Trace data routing for MIPI P1149.7 */
+#define N_NCI		25	/* NFC NCI UART */
 
 #endif /* _UAPI_LINUX_TTY_H */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 8a66165..cb31229 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -14,12 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-
-struct bpf_array {
-	struct bpf_map map;
-	u32 elem_size;
-	char value[0] __aligned(8);
-};
+#include <linux/filter.h>
 
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
@@ -154,3 +149,109 @@
 	return 0;
 }
 late_initcall(register_array_map);
+
+static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+{
+	/* only bpf_prog file descriptors can be stored in prog_array map */
+	if (attr->value_size != sizeof(u32))
+		return ERR_PTR(-EINVAL);
+	return array_map_alloc(attr);
+}
+
+static void prog_array_map_free(struct bpf_map *map)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	int i;
+
+	synchronize_rcu();
+
+	/* make sure it's empty */
+	for (i = 0; i < array->map.max_entries; i++)
+		BUG_ON(array->prog[i] != NULL);
+	kvfree(array);
+}
+
+static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+	return NULL;
+}
+
+/* only called from syscall */
+static int prog_array_map_update_elem(struct bpf_map *map, void *key,
+				      void *value, u64 map_flags)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct bpf_prog *prog, *old_prog;
+	u32 index = *(u32 *)key, ufd;
+
+	if (map_flags != BPF_ANY)
+		return -EINVAL;
+
+	if (index >= array->map.max_entries)
+		return -E2BIG;
+
+	ufd = *(u32 *)value;
+	prog = bpf_prog_get(ufd);
+	if (IS_ERR(prog))
+		return PTR_ERR(prog);
+
+	if (!bpf_prog_array_compatible(array, prog)) {
+		bpf_prog_put(prog);
+		return -EINVAL;
+	}
+
+	old_prog = xchg(array->prog + index, prog);
+	if (old_prog)
+		bpf_prog_put_rcu(old_prog);
+
+	return 0;
+}
+
+static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct bpf_prog *old_prog;
+	u32 index = *(u32 *)key;
+
+	if (index >= array->map.max_entries)
+		return -E2BIG;
+
+	old_prog = xchg(array->prog + index, NULL);
+	if (old_prog) {
+		bpf_prog_put_rcu(old_prog);
+		return 0;
+	} else {
+		return -ENOENT;
+	}
+}
+
+/* decrement refcnt of all bpf_progs that are stored in this map */
+void bpf_prog_array_map_clear(struct bpf_map *map)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	int i;
+
+	for (i = 0; i < array->map.max_entries; i++)
+		prog_array_map_delete_elem(map, &i);
+}
+
+static const struct bpf_map_ops prog_array_ops = {
+	.map_alloc = prog_array_map_alloc,
+	.map_free = prog_array_map_free,
+	.map_get_next_key = array_map_get_next_key,
+	.map_lookup_elem = prog_array_map_lookup_elem,
+	.map_update_elem = prog_array_map_update_elem,
+	.map_delete_elem = prog_array_map_delete_elem,
+};
+
+static struct bpf_map_type_list prog_array_type __read_mostly = {
+	.ops = &prog_array_ops,
+	.type = BPF_MAP_TYPE_PROG_ARRAY,
+};
+
+static int __init register_prog_array_map(void)
+{
+	bpf_register_map_type(&prog_array_type);
+	return 0;
+}
+late_initcall(register_prog_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 54f0e7f..c5bedc8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -26,9 +26,10 @@
 #include <linux/vmalloc.h>
 #include <linux/random.h>
 #include <linux/moduleloader.h>
-#include <asm/unaligned.h>
 #include <linux/bpf.h>
 
+#include <asm/unaligned.h>
+
 /* Registers */
 #define BPF_R0	regs[BPF_REG_0]
 #define BPF_R1	regs[BPF_REG_1]
@@ -62,6 +63,7 @@
 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
 	else if (k >= SKF_LL_OFF)
 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+
 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
 		return ptr;
 
@@ -244,6 +246,7 @@
 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
 		/* Call instruction */
 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
+		[BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
 		/* Jumps */
 		[BPF_JMP | BPF_JA] = &&JMP_JA,
 		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
@@ -286,6 +289,7 @@
 		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
 		[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
 	};
+	u32 tail_call_cnt = 0;
 	void *ptr;
 	int off;
 
@@ -431,6 +435,30 @@
 						       BPF_R4, BPF_R5);
 		CONT;
 
+	JMP_TAIL_CALL: {
+		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+		struct bpf_array *array = container_of(map, struct bpf_array, map);
+		struct bpf_prog *prog;
+		u64 index = BPF_R3;
+
+		if (unlikely(index >= array->map.max_entries))
+			goto out;
+
+		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
+			goto out;
+
+		tail_call_cnt++;
+
+		prog = READ_ONCE(array->prog[index]);
+		if (unlikely(!prog))
+			goto out;
+
+		ARG1 = BPF_R1;
+		insn = prog->insnsi;
+		goto select_insn;
+out:
+		CONT;
+	}
 	/* JMP */
 	JMP_JA:
 		insn += insn->off;
@@ -615,25 +643,63 @@
 		return 0;
 }
 
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+bool bpf_prog_array_compatible(struct bpf_array *array,
+			       const struct bpf_prog *fp)
 {
+	if (!array->owner_prog_type) {
+		/* There's no owner yet where we could check for
+		 * compatibility.
+		 */
+		array->owner_prog_type = fp->type;
+		array->owner_jited = fp->jited;
+
+		return true;
+	}
+
+	return array->owner_prog_type == fp->type &&
+	       array->owner_jited == fp->jited;
+}
+
+static int bpf_check_tail_call(const struct bpf_prog *fp)
+{
+	struct bpf_prog_aux *aux = fp->aux;
+	int i;
+
+	for (i = 0; i < aux->used_map_cnt; i++) {
+		struct bpf_map *map = aux->used_maps[i];
+		struct bpf_array *array;
+
+		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+			continue;
+
+		array = container_of(map, struct bpf_array, map);
+		if (!bpf_prog_array_compatible(array, fp))
+			return -EINVAL;
+	}
+
+	return 0;
 }
 
 /**
- *	bpf_prog_select_runtime - select execution runtime for BPF program
+ *	bpf_prog_select_runtime - select exec runtime for BPF program
  *	@fp: bpf_prog populated with internal BPF program
  *
- * try to JIT internal BPF program, if JIT is not available select interpreter
- * BPF program will be executed via BPF_PROG_RUN() macro
+ * Try to JIT eBPF program, if JIT is not available, use interpreter.
+ * The BPF program will be executed via BPF_PROG_RUN() macro.
  */
-void bpf_prog_select_runtime(struct bpf_prog *fp)
+int bpf_prog_select_runtime(struct bpf_prog *fp)
 {
 	fp->bpf_func = (void *) __bpf_prog_run;
 
-	/* Probe if internal BPF can be JITed */
 	bpf_int_jit_compile(fp);
-	/* Lock whole bpf_prog as read-only */
 	bpf_prog_lock_ro(fp);
+
+	/* The tail call compatibility check can only be done at
+	 * this late stage as we need to determine, if we deal
+	 * with JITed or non JITed program concatenations and not
+	 * all eBPF JITs might immediately support all features.
+	 */
+	return bpf_check_tail_call(fp);
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
 
@@ -663,6 +729,29 @@
 
 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
+const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
+const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
+const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
+const struct bpf_func_proto bpf_get_current_comm_proto __weak;
+const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
+{
+	return NULL;
+}
+
+/* Always built-in helper functions. */
+const struct bpf_func_proto bpf_tail_call_proto = {
+	.func		= NULL,
+	.gpl_only	= false,
+	.ret_type	= RET_VOID,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_CONST_MAP_PTR,
+	.arg3_type	= ARG_ANYTHING,
+};
+
+/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
+void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+{
+}
 
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index bd7f598..1447ec0 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -13,6 +13,9 @@
 #include <linux/rcupdate.h>
 #include <linux/random.h>
 #include <linux/smp.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+#include <linux/uidgid.h>
 
 /* If kernel subsystem is allowing eBPF programs to call this function,
  * inside its own verifier_ops->get_func_proto() callback it should return
@@ -44,11 +47,11 @@
 }
 
 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
-	.func = bpf_map_lookup_elem,
-	.gpl_only = false,
-	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
-	.arg1_type = ARG_CONST_MAP_PTR,
-	.arg2_type = ARG_PTR_TO_MAP_KEY,
+	.func		= bpf_map_lookup_elem,
+	.gpl_only	= false,
+	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 };
 
 static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -63,13 +66,13 @@
 }
 
 const struct bpf_func_proto bpf_map_update_elem_proto = {
-	.func = bpf_map_update_elem,
-	.gpl_only = false,
-	.ret_type = RET_INTEGER,
-	.arg1_type = ARG_CONST_MAP_PTR,
-	.arg2_type = ARG_PTR_TO_MAP_KEY,
-	.arg3_type = ARG_PTR_TO_MAP_VALUE,
-	.arg4_type = ARG_ANYTHING,
+	.func		= bpf_map_update_elem,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_PTR_TO_MAP_KEY,
+	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
+	.arg4_type	= ARG_ANYTHING,
 };
 
 static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -83,11 +86,11 @@
 }
 
 const struct bpf_func_proto bpf_map_delete_elem_proto = {
-	.func = bpf_map_delete_elem,
-	.gpl_only = false,
-	.ret_type = RET_INTEGER,
-	.arg1_type = ARG_CONST_MAP_PTR,
-	.arg2_type = ARG_PTR_TO_MAP_KEY,
+	.func		= bpf_map_delete_elem,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 };
 
 static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -111,3 +114,71 @@
 	.gpl_only	= false,
 	.ret_type	= RET_INTEGER,
 };
+
+static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	/* NMI safe access to clock monotonic */
+	return ktime_get_mono_fast_ns();
+}
+
+const struct bpf_func_proto bpf_ktime_get_ns_proto = {
+	.func		= bpf_ktime_get_ns,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+};
+
+static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	struct task_struct *task = current;
+
+	if (!task)
+		return -EINVAL;
+
+	return (u64) task->tgid << 32 | task->pid;
+}
+
+const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
+	.func		= bpf_get_current_pid_tgid,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+};
+
+static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	struct task_struct *task = current;
+	kuid_t uid;
+	kgid_t gid;
+
+	if (!task)
+		return -EINVAL;
+
+	current_uid_gid(&uid, &gid);
+	return (u64) from_kgid(&init_user_ns, gid) << 32 |
+		from_kuid(&init_user_ns, uid);
+}
+
+const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
+	.func		= bpf_get_current_uid_gid,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+};
+
+static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
+{
+	struct task_struct *task = current;
+	char *buf = (char *) (long) r1;
+
+	if (!task)
+		return -EINVAL;
+
+	memcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
+	return 0;
+}
+
+const struct bpf_func_proto bpf_get_current_comm_proto = {
+	.func		= bpf_get_current_comm,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_STACK,
+	.arg2_type	= ARG_CONST_STACK_SIZE,
+};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3bae6c5..a1b14d1 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -68,6 +68,12 @@
 {
 	struct bpf_map *map = filp->private_data;
 
+	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+		/* prog_array stores refcnt-ed bpf_prog pointers
+		 * release them all when user space closes prog_array_fd
+		 */
+		bpf_prog_array_map_clear(map);
+
 	bpf_map_put(map);
 	return 0;
 }
@@ -392,6 +398,19 @@
 			 */
 			BUG_ON(!prog->aux->ops->get_func_proto);
 
+			if (insn->imm == BPF_FUNC_tail_call) {
+				/* mark bpf_tail_call as different opcode
+				 * to avoid conditional branch in
+				 * interpeter for every normal call
+				 * and to prevent accidental JITing by
+				 * JIT compiler that doesn't support
+				 * bpf_tail_call yet
+				 */
+				insn->imm = 0;
+				insn->code |= BPF_X;
+				continue;
+			}
+
 			fn = prog->aux->ops->get_func_proto(insn->imm);
 			/* all functions that have prototype and verifier allowed
 			 * programs to call them, must be real in-kernel functions
@@ -413,6 +432,23 @@
 	kfree(aux->used_maps);
 }
 
+static void __prog_put_rcu(struct rcu_head *rcu)
+{
+	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
+
+	free_used_maps(aux);
+	bpf_prog_free(aux->prog);
+}
+
+/* version of bpf_prog_put() that is called after a grace period */
+void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+	if (atomic_dec_and_test(&prog->aux->refcnt)) {
+		prog->aux->prog = prog;
+		call_rcu(&prog->aux->rcu, __prog_put_rcu);
+	}
+}
+
 void bpf_prog_put(struct bpf_prog *prog)
 {
 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
@@ -426,7 +462,7 @@
 {
 	struct bpf_prog *prog = filp->private_data;
 
-	bpf_prog_put(prog);
+	bpf_prog_put_rcu(prog);
 	return 0;
 }
 
@@ -532,7 +568,9 @@
 	fixup_bpf_calls(prog);
 
 	/* eBPF program is ready to be JITed */
-	bpf_prog_select_runtime(prog);
+	err = bpf_prog_select_runtime(prog);
+	if (err < 0)
+		goto free_used_maps;
 
 	err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
 	if (err < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 47dcd3a..039d866 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -907,6 +907,23 @@
 			fn->ret_type, func_id);
 		return -EINVAL;
 	}
+
+	if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
+	    func_id != BPF_FUNC_tail_call)
+		/* prog_array map type needs extra care:
+		 * only allow to pass it into bpf_tail_call() for now.
+		 * bpf_map_delete_elem() can be allowed in the future,
+		 * while bpf_map_update_elem() must only be done via syscall
+		 */
+		return -EINVAL;
+
+	if (func_id == BPF_FUNC_tail_call &&
+	    map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+		/* don't allow any other map type to be passed into
+		 * bpf_tail_call()
+		 */
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -1675,6 +1692,8 @@
 			}
 
 		} else if (class == BPF_STX) {
+			enum bpf_reg_type dst_reg_type;
+
 			if (BPF_MODE(insn->code) == BPF_XADD) {
 				err = check_xadd(env, insn);
 				if (err)
@@ -1683,11 +1702,6 @@
 				continue;
 			}
 
-			if (BPF_MODE(insn->code) != BPF_MEM ||
-			    insn->imm != 0) {
-				verbose("BPF_STX uses reserved fields\n");
-				return -EINVAL;
-			}
 			/* check src1 operand */
 			err = check_reg_arg(regs, insn->src_reg, SRC_OP);
 			if (err)
@@ -1697,6 +1711,8 @@
 			if (err)
 				return err;
 
+			dst_reg_type = regs[insn->dst_reg].type;
+
 			/* check that memory (dst_reg + off) is writeable */
 			err = check_mem_access(env, insn->dst_reg, insn->off,
 					       BPF_SIZE(insn->code), BPF_WRITE,
@@ -1704,6 +1720,15 @@
 			if (err)
 				return err;
 
+			if (insn->imm == 0) {
+				insn->imm = dst_reg_type;
+			} else if (dst_reg_type != insn->imm &&
+				   (dst_reg_type == PTR_TO_CTX ||
+				    insn->imm == PTR_TO_CTX)) {
+				verbose("same insn cannot be used with different pointers\n");
+				return -EINVAL;
+			}
+
 		} else if (class == BPF_ST) {
 			if (BPF_MODE(insn->code) != BPF_MEM ||
 			    insn->src_reg != BPF_REG_0) {
@@ -1822,12 +1847,18 @@
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
 		if (BPF_CLASS(insn->code) == BPF_LDX &&
-		    (BPF_MODE(insn->code) != BPF_MEM ||
-		     insn->imm != 0)) {
+		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
 			verbose("BPF_LDX uses reserved fields\n");
 			return -EINVAL;
 		}
 
+		if (BPF_CLASS(insn->code) == BPF_STX &&
+		    ((BPF_MODE(insn->code) != BPF_MEM &&
+		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
+			verbose("BPF_STX uses reserved fields\n");
+			return -EINVAL;
+		}
+
 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
 			struct bpf_map *map;
 			struct fd f;
@@ -1950,12 +1981,17 @@
 	struct bpf_prog *new_prog;
 	u32 cnt;
 	int i;
+	enum bpf_access_type type;
 
 	if (!env->prog->aux->ops->convert_ctx_access)
 		return 0;
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
-		if (insn->code != (BPF_LDX | BPF_MEM | BPF_W))
+		if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
+			type = BPF_READ;
+		else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
+			type = BPF_WRITE;
+		else
 			continue;
 
 		if (insn->imm != PTR_TO_CTX) {
@@ -1965,7 +2001,7 @@
 		}
 
 		cnt = env->prog->aux->ops->
-			convert_ctx_access(insn->dst_reg, insn->src_reg,
+			convert_ctx_access(type, insn->dst_reg, insn->src_reg,
 					   insn->off, insn_buf);
 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
 			verbose("bpf verifier is misconfigured\n");
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 4f44028..245df6b 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -346,16 +346,13 @@
  */
 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 {
-	struct seccomp_filter *filter;
-	unsigned long fp_size;
-	struct sock_filter *fp;
-	int new_len;
-	long ret;
+	struct seccomp_filter *sfilter;
+	int ret;
 
 	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
 		return ERR_PTR(-EINVAL);
+
 	BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
-	fp_size = fprog->len * sizeof(struct sock_filter);
 
 	/*
 	 * Installing a seccomp filter requires that the task has
@@ -368,60 +365,21 @@
 				     CAP_SYS_ADMIN) != 0)
 		return ERR_PTR(-EACCES);
 
-	fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
-	if (!fp)
+	/* Allocate a new seccomp_filter */
+	sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
+	if (!sfilter)
 		return ERR_PTR(-ENOMEM);
 
-	/* Copy the instructions from fprog. */
-	ret = -EFAULT;
-	if (copy_from_user(fp, fprog->filter, fp_size))
-		goto free_prog;
+	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
+					seccomp_check_filter);
+	if (ret < 0) {
+		kfree(sfilter);
+		return ERR_PTR(ret);
+	}
 
-	/* Check and rewrite the fprog via the skb checker */
-	ret = bpf_check_classic(fp, fprog->len);
-	if (ret)
-		goto free_prog;
+	atomic_set(&sfilter->usage, 1);
 
-	/* Check and rewrite the fprog for seccomp use */
-	ret = seccomp_check_filter(fp, fprog->len);
-	if (ret)
-		goto free_prog;
-
-	/* Convert 'sock_filter' insns to 'bpf_insn' insns */
-	ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
-	if (ret)
-		goto free_prog;
-
-	/* Allocate a new seccomp_filter */
-	ret = -ENOMEM;
-	filter = kzalloc(sizeof(struct seccomp_filter),
-			 GFP_KERNEL|__GFP_NOWARN);
-	if (!filter)
-		goto free_prog;
-
-	filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
-	if (!filter->prog)
-		goto free_filter;
-
-	ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
-	if (ret)
-		goto free_filter_prog;
-
-	kfree(fp);
-	atomic_set(&filter->usage, 1);
-	filter->prog->len = new_len;
-
-	bpf_prog_select_runtime(filter->prog);
-
-	return filter;
-
-free_filter_prog:
-	__bpf_prog_free(filter->prog);
-free_filter:
-	kfree(filter);
-free_prog:
-	kfree(fp);
-	return ERR_PTR(ret);
+	return sfilter;
 }
 
 /**
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 2d56ce5..88a041a 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -79,18 +79,6 @@
 	.arg3_type	= ARG_ANYTHING,
 };
 
-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
-	/* NMI safe access to clock monotonic */
-	return ktime_get_mono_fast_ns();
-}
-
-static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
-	.func		= bpf_ktime_get_ns,
-	.gpl_only	= true,
-	.ret_type	= RET_INTEGER,
-};
-
 /*
  * limited trace_printk()
  * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
@@ -159,6 +147,17 @@
 	.arg2_type	= ARG_CONST_STACK_SIZE,
 };
 
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+{
+	/*
+	 * this program might be calling bpf_trace_printk,
+	 * so allocate per-cpu printk buffers
+	 */
+	trace_printk_init_buffers();
+
+	return &bpf_trace_printk_proto;
+}
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
 	switch (func_id) {
@@ -172,15 +171,18 @@
 		return &bpf_probe_read_proto;
 	case BPF_FUNC_ktime_get_ns:
 		return &bpf_ktime_get_ns_proto;
-
+	case BPF_FUNC_tail_call:
+		return &bpf_tail_call_proto;
+	case BPF_FUNC_get_current_pid_tgid:
+		return &bpf_get_current_pid_tgid_proto;
+	case BPF_FUNC_get_current_uid_gid:
+		return &bpf_get_current_uid_gid_proto;
+	case BPF_FUNC_get_current_comm:
+		return &bpf_get_current_comm_proto;
 	case BPF_FUNC_trace_printk:
-		/*
-		 * this program might be calling bpf_trace_printk,
-		 * so allocate per-cpu printk buffers
-		 */
-		trace_printk_init_buffers();
-
-		return &bpf_trace_printk_proto;
+		return bpf_get_trace_printk_proto();
+	case BPF_FUNC_get_smp_processor_id:
+		return &bpf_get_smp_processor_id_proto;
 	default:
 		return NULL;
 	}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 8609378..a60a6d3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -585,7 +585,6 @@
 	struct bucket_table *tbl = iter->walker->tbl;
 	struct rhashtable *ht = iter->ht;
 	struct rhash_head *p = iter->p;
-	void *obj = NULL;
 
 	if (p) {
 		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
@@ -605,8 +604,7 @@
 		if (!rht_is_a_nulls(p)) {
 			iter->skip++;
 			iter->p = p;
-			obj = rht_obj(ht, p);
-			goto out;
+			return rht_obj(ht, p);
 		}
 
 		iter->skip = 0;
@@ -624,9 +622,7 @@
 
 	iter->p = NULL;
 
-out:
-
-	return obj;
+	return NULL;
 }
 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
 
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 80d78c5..7f58c73 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -21,6 +21,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
+#include <linux/random.h>
 
 /* General test specific settings */
 #define MAX_SUBTESTS	3
@@ -67,6 +68,10 @@
 	union {
 		struct sock_filter insns[MAX_INSNS];
 		struct bpf_insn insns_int[MAX_INSNS];
+		struct {
+			void *insns;
+			unsigned int len;
+		} ptr;
 	} u;
 	__u8 aux;
 	__u8 data[MAX_DATA];
@@ -74,8 +79,282 @@
 		int data_size;
 		__u32 result;
 	} test[MAX_SUBTESTS];
+	int (*fill_helper)(struct bpf_test *self);
 };
 
+/* Large test cases need separate allocation and fill handler. */
+
+static int bpf_fill_maxinsns1(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	__u32 k = ~0;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++, k--)
+		insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns2(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++)
+		insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns3(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	struct rnd_state rnd;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	prandom_seed_state(&rnd, 3141592653589793238ULL);
+
+	for (i = 0; i < len - 1; i++) {
+		__u32 k = prandom_u32_state(&rnd);
+
+		insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
+	}
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns4(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS + 1;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++)
+		insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns5(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
+
+	for (i = 1; i < len - 1; i++)
+		insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns6(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len - 1; i++)
+		insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+				     SKF_AD_VLAN_TAG_PRESENT);
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns7(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len - 4; i++)
+		insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+				     SKF_AD_CPU);
+
+	insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
+	insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+				   SKF_AD_CPU);
+	insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns8(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i, jmp_off = len - 3;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);
+
+	for (i = 1; i < len - 1; i++)
+		insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns9(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct bpf_insn *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
+	insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
+	insn[2] = BPF_EXIT_INSN();
+
+	for (i = 3; i < len - 2; i++)
+		insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);
+
+	insn[len - 2] = BPF_EXIT_INSN();
+	insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns10(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS, hlen = len - 2;
+	struct bpf_insn *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < hlen / 2; i++)
+		insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
+	for (i = hlen - 1; i > hlen / 2; i--)
+		insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);
+
+	insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
+	insn[hlen]     = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
+	insn[hlen + 1] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
+			 unsigned int plen)
+{
+	struct sock_filter *insn;
+	unsigned int rlen;
+	int i, j;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	rlen = (len % plen) - 1;
+
+	for (i = 0; i + plen < len; i += plen)
+		for (j = 0; j < plen; j++)
+			insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
+						 plen - 1 - j, 0, 0);
+	for (j = 0; j < rlen; j++)
+		insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
+					 0, 0);
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+static int bpf_fill_maxinsns11(struct bpf_test *self)
+{
+	/* Hits 70 passes on x86_64, so cannot get JITed there. */
+	return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
+}
+
+static int bpf_fill_ja(struct bpf_test *self)
+{
+	/* Hits exactly 11 passes on x86_64 JIT. */
+	return __bpf_fill_ja(self, 12, 9);
+}
+
 static struct bpf_test tests[] = {
 	{
 		"TAX",
@@ -1755,7 +2034,8 @@
 			BPF_EXIT_INSN(),
 			BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
 			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0x1ffffffffLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -1805,6 +2085,2313 @@
 		  0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
 		{ { 38, 256 } }
 	},
+	/* BPF_ALU | BPF_MOV | BPF_X */
+	{
+		"ALU_MOV_X: dst = 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_MOV_X: dst = 4294967295",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU64_MOV_X: dst = 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOV_X: dst = 4294967295",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	/* BPF_ALU | BPF_MOV | BPF_K */
+	{
+		"ALU_MOV_K: dst = 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_MOV_K: dst = 4294967295",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
+			BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MOV_K: dst = 2",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOV_K: dst = 2147483647",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_OR_K: dst = 0x0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0),
+			BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MOV_K: dst = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_ADD | BPF_X */
+	{
+		"ALU_ADD_X: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_ADD_X: 1 + 4294967294 = 4294967295",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU64_ADD_X: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_ADD_X: 1 + 4294967294 = 4294967295",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	/* BPF_ALU | BPF_ADD | BPF_K */
+	{
+		"ALU_ADD_K: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_ADD_K: 3 + 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_ADD, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_ADD_K: 1 + 4294967294 = 4294967295",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x00000000ffffffff),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_ADD_K: 3 + 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_ADD, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_ADD_K: 1 + 2147483646 = 2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483646),
+			BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } },
+	},
+	{
+		"ALU64_ADD_K: 1 + 0 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x1),
+			BPF_LD_IMM64(R3, 0x1),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_SUB | BPF_X */
+	{
+		"ALU_SUB_X: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_SUB_X: 4294967295 - 4294967294 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU32_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_SUB_X: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_SUB_X: 4294967295 - 4294967294 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_ALU | BPF_SUB | BPF_K */
+	{
+		"ALU_SUB_K: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_SUB, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_SUB_K: 3 - 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_SUB, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_SUB_K: 4294967295 - 4294967294 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_SUB_K: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_SUB, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_SUB_K: 3 - 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_SUB, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_SUB_K: 4294967294 - 4294967295 = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967294U),
+			BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } },
+	},
+	{
+		"ALU64_ADD_K: 2147483646 - 2147483647 = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483646),
+			BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } },
+	},
+	/* BPF_ALU | BPF_MUL | BPF_X */
+	{
+		"ALU_MUL_X: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 3),
+			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
+			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xFFFFFFF0 } },
+	},
+	{
+		"ALU_MUL_X: -1 * -1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, -1),
+			BPF_ALU32_IMM(BPF_MOV, R1, -1),
+			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_MUL_X: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 3),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU64_MUL_X: 1 * 2147483647 = 2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	/* BPF_ALU | BPF_MUL | BPF_K */
+	{
+		"ALU_MUL_K: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MUL, R0, 3),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU_MUL_K: 3 * 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MUL, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xFFFFFFF0 } },
+	},
+	{
+		"ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x1),
+			BPF_LD_IMM64(R3, 0x00000000ffffffff),
+			BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MUL_K: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU64_IMM(BPF_MUL, R0, 3),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU64_MUL_K: 3 * 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_MUL, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_MUL_K: 1 * 2147483647 = 2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_MUL_K: 1 * -2147483647 = -2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -2147483647 } },
+	},
+	{
+		"ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x1),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_DIV | BPF_X */
+	{
+		"ALU_DIV_X: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_DIV_X: 4294967295 / 4294967295 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+			BPF_ALU32_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_DIV_X: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_DIV_X: 2147483647 / 2147483647 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+			BPF_ALU64_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0x0000000000000001LL),
+			BPF_ALU64_REG(BPF_DIV, R2, R4),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_DIV | BPF_K */
+	{
+		"ALU_DIV_K: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU32_IMM(BPF_DIV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_DIV_K: 3 / 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_DIV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_DIV_K: 4294967295 / 4294967295 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0x1UL),
+			BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_DIV_K: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU64_IMM(BPF_DIV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_DIV_K: 3 / 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_DIV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_DIV_K: 2147483647 / 2147483647 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0x0000000000000001LL),
+			BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_MOD | BPF_X */
+	{
+		"ALU_MOD_X: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_MOD_X: 4294967295 % 4294967293 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
+			BPF_ALU32_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOD_X: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_MOD_X: 2147483647 % 2147483645 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
+			BPF_ALU64_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	/* BPF_ALU | BPF_MOD | BPF_K */
+	{
+		"ALU_MOD_K: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_MOD_K: 3 % 1 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOD, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } },
+	},
+	{
+		"ALU_MOD_K: 4294967295 % 4294967293 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOD_K: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_MOD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_MOD_K: 3 % 1 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_MOD, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } },
+	},
+	{
+		"ALU64_MOD_K: 2147483647 % 2147483645 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	/* BPF_ALU | BPF_AND | BPF_X */
+	{
+		"ALU_AND_X: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU32_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_AND_X: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU64_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	/* BPF_ALU | BPF_AND | BPF_K */
+	{
+		"ALU_AND_K: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_AND, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_AND_K: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_AND, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000000000000000LL),
+			BPF_ALU64_IMM(BPF_AND, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+			BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_OR | BPF_X */
+	{
+		"ALU_OR_X: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU32_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_OR_X: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU64_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	/* BPF_ALU | BPF_OR | BPF_K */
+	{
+		"ALU_OR_K: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_OR, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_OR_K: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_OR, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+			BPF_ALU64_IMM(BPF_OR, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000000000000000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_XOR | BPF_X */
+	{
+		"ALU_XOR_X: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU32_IMM(BPF_MOV, R1, 6),
+			BPF_ALU32_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU32_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	{
+		"ALU64_XOR_X: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU32_IMM(BPF_MOV, R1, 6),
+			BPF_ALU64_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU64_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	/* BPF_ALU | BPF_XOR | BPF_K */
+	{
+		"ALU_XOR_K: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU32_IMM(BPF_XOR, R0, 6),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	{
+		"ALU64_XOR_K: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU64_IMM(BPF_XOR, R0, 6),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	{
+		"ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+			BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
+			BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000000000000000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_LSH | BPF_X */
+	{
+		"ALU_LSH_X: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_LSH_X: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU32_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	{
+		"ALU64_LSH_X: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_LSH_X: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	/* BPF_ALU | BPF_LSH | BPF_K */
+	{
+		"ALU_LSH_K: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_LSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_LSH_K: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_LSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	{
+		"ALU64_LSH_K: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_LSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_LSH_K: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_LSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	/* BPF_ALU | BPF_RSH | BPF_X */
+	{
+		"ALU_RSH_X: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_RSH_X: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU32_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_RSH_X: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_RSH_X: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_ALU | BPF_RSH | BPF_K */
+	{
+		"ALU_RSH_K: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_RSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_RSH_K: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU32_IMM(BPF_RSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_RSH_K: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU64_IMM(BPF_RSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_RSH_K: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU64_IMM(BPF_RSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_ALU | BPF_ARSH | BPF_X */
+	{
+		"ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 40),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffff00ff } },
+	},
+	/* BPF_ALU | BPF_ARSH | BPF_K */
+	{
+		"ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffff00ff } },
+	},
+	/* BPF_ALU | BPF_NEG */
+	{
+		"ALU_NEG: -(3) = -3",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 3),
+			BPF_ALU32_IMM(BPF_NEG, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -3 } },
+	},
+	{
+		"ALU_NEG: -(-3) = 3",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -3),
+			BPF_ALU32_IMM(BPF_NEG, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_NEG: -(3) = -3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_NEG, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -3 } },
+	},
+	{
+		"ALU64_NEG: -(-3) = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, -3),
+			BPF_ALU64_IMM(BPF_NEG, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	/* BPF_ALU | BPF_END | BPF_FROM_BE */
+	{
+		"ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0,  cpu_to_be16(0xcdef) } },
+	},
+	{
+		"ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, cpu_to_be32(0x89abcdef) } },
+	},
+	{
+		"ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
+	},
+	/* BPF_ALU | BPF_END | BPF_FROM_LE */
+	{
+		"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, cpu_to_le16(0xcdef) } },
+	},
+	{
+		"ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, cpu_to_le32(0x89abcdef) } },
+	},
+	{
+		"ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
+	},
+	/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
+	{
+		"ST_MEM_B: Store/Load byte: max negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_B, R10, -40, 0xff),
+			BPF_LDX_MEM(BPF_B, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xff } },
+	},
+	{
+		"ST_MEM_B: Store/Load byte: max positive",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x7f } },
+	},
+	{
+		"STX_MEM_B: Store/Load byte: max negative",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffLL),
+			BPF_STX_MEM(BPF_B, R10, R1, -40),
+			BPF_LDX_MEM(BPF_B, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xff } },
+	},
+	{
+		"ST_MEM_H: Store/Load half word: max negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffff } },
+	},
+	{
+		"ST_MEM_H: Store/Load half word: max positive",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x7fff } },
+	},
+	{
+		"STX_MEM_H: Store/Load half word: max negative",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffffLL),
+			BPF_STX_MEM(BPF_H, R10, R1, -40),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffff } },
+	},
+	{
+		"ST_MEM_W: Store/Load word: max negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ST_MEM_W: Store/Load word: max positive",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x7fffffff } },
+	},
+	{
+		"STX_MEM_W: Store/Load word: max negative",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffLL),
+			BPF_STX_MEM(BPF_W, R10, R1, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ST_MEM_DW: Store/Load double word: max negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ST_MEM_DW: Store/Load double word: max negative 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+			BPF_LDX_MEM(BPF_DW, R2, R10, -40),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ST_MEM_DW: Store/Load double word: max positive",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x7fffffff } },
+	},
+	{
+		"STX_MEM_DW: Store/Load double word: max negative",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_STX_MEM(BPF_W, R10, R1, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	/* BPF_STX | BPF_XADD | BPF_W/DW */
+	{
+		"STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+			BPF_STX_XADD(BPF_W, R10, R0, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x22 } },
+	},
+	{
+		"STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+			BPF_STX_XADD(BPF_DW, R10, R0, -40),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x22 } },
+	},
+	/* BPF_JMP | BPF_EXIT */
+	{
+		"JMP_EXIT",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x4711 } },
+	},
+	/* BPF_JMP | BPF_JA */
+	{
+		"JMP_JA: Unconditional jump: if (true) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JSGT | BPF_K */
+	{
+		"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JSGE | BPF_K */
+	{
+		"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JGT | BPF_K */
+	{
+		"JMP_JGT_K: if (3 > 2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JGE | BPF_K */
+	{
+		"JMP_JGE_K: if (3 >= 2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JGT | BPF_K jump backwards */
+	{
+		"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
+		.u.insns_int = {
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+			BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
+			BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JGE_K: if (3 >= 3) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JNE | BPF_K */
+	{
+		"JMP_JNE_K: if (3 != 2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JEQ | BPF_K */
+	{
+		"JMP_JEQ_K: if (3 == 3) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JSET | BPF_K */
+	{
+		"JMP_JSET_K: if (0x3 & 0x2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JSGT | BPF_X */
+	{
+		"JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -2),
+			BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -1),
+			BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JSGE | BPF_X */
+	{
+		"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -2),
+			BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -1),
+			BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JGT | BPF_X */
+	{
+		"JMP_JGT_X: if (3 > 2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JGE | BPF_X */
+	{
+		"JMP_JGE_X: if (3 >= 2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JGE_X: if (3 >= 3) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 3),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JNE | BPF_X */
+	{
+		"JMP_JNE_X: if (3 != 2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JEQ | BPF_X */
+	{
+		"JMP_JEQ_X: if (3 == 3) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 3),
+			BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_JMP | BPF_JSET | BPF_X */
+	{
+		"JMP_JSET_X: if (0x3 & 0x2) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JA: Jump, gap, jump, ...",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xababcbac } },
+		.fill_helper = bpf_fill_ja,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Maximum possible literals",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xffffffff } },
+		.fill_helper = bpf_fill_maxinsns1,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Single literal",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xfefefefe } },
+		.fill_helper = bpf_fill_maxinsns2,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Run/add until end",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0x947bf368 } },
+		.fill_helper = bpf_fill_maxinsns3,
+	},
+	{
+		"BPF_MAXINSNS: Too many instructions",
+		{ },
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = bpf_fill_maxinsns4,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Very long jump",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xabababab } },
+		.fill_helper = bpf_fill_maxinsns5,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Ctx heavy transformations",
+		{ },
+		CLASSIC,
+		{ },
+		{
+			{  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+			{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+		},
+		.fill_helper = bpf_fill_maxinsns6,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Call heavy transformations",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 1, 0 }, { 10, 0 } },
+		.fill_helper = bpf_fill_maxinsns7,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Jump heavy test",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xffffffff } },
+		.fill_helper = bpf_fill_maxinsns8,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Very long jump backwards",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xcbababab } },
+		.fill_helper = bpf_fill_maxinsns9,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Edge hopping nuthouse",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xabababac } },
+		.fill_helper = bpf_fill_maxinsns10,
+	},
+	{
+		"BPF_MAXINSNS: Jump, gap, jump, ...",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xababcbac } },
+		.fill_helper = bpf_fill_maxinsns11,
+	},
 };
 
 static struct net_device dev;
@@ -1858,10 +4445,15 @@
 	kfree_skb(data);
 }
 
-static int probe_filter_length(struct sock_filter *fp)
+static int filter_length(int which)
 {
-	int len = 0;
+	struct sock_filter *fp;
+	int len;
 
+	if (tests[which].fill_helper)
+		return tests[which].u.ptr.len;
+
+	fp = tests[which].u.insns;
 	for (len = MAX_INSNS - 1; len > 0; --len)
 		if (fp[len].code != 0 || fp[len].k != 0)
 			break;
@@ -1869,16 +4461,25 @@
 	return len + 1;
 }
 
+static void *filter_pointer(int which)
+{
+	if (tests[which].fill_helper)
+		return tests[which].u.ptr.insns;
+	else
+		return tests[which].u.insns;
+}
+
 static struct bpf_prog *generate_filter(int which, int *err)
 {
-	struct bpf_prog *fp;
-	struct sock_fprog_kern fprog;
-	unsigned int flen = probe_filter_length(tests[which].u.insns);
 	__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+	unsigned int flen = filter_length(which);
+	void *fptr = filter_pointer(which);
+	struct sock_fprog_kern fprog;
+	struct bpf_prog *fp;
 
 	switch (test_type) {
 	case CLASSIC:
-		fprog.filter = tests[which].u.insns;
+		fprog.filter = fptr;
 		fprog.len = flen;
 
 		*err = bpf_prog_create(&fp, &fprog);
@@ -1914,8 +4515,7 @@
 		}
 
 		fp->len = flen;
-		memcpy(fp->insnsi, tests[which].u.insns_int,
-		       fp->len * sizeof(struct bpf_insn));
+		memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
 		bpf_prog_select_runtime(fp);
 		break;
@@ -1987,9 +4587,33 @@
 	return err_cnt;
 }
 
+static __init int prepare_bpf_tests(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+		if (tests[i].fill_helper &&
+		    tests[i].fill_helper(&tests[i]) < 0)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static __init void destroy_bpf_tests(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+		if (tests[i].fill_helper)
+			kfree(tests[i].u.ptr.insns);
+	}
+}
+
 static __init int test_bpf(void)
 {
 	int i, err_cnt = 0, pass_cnt = 0;
+	int jit_cnt = 0, run_cnt = 0;
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		struct bpf_prog *fp;
@@ -2006,6 +4630,13 @@
 
 			return err;
 		}
+
+		pr_cont("jited:%u ", fp->jited);
+
+		run_cnt++;
+		if (fp->jited)
+			jit_cnt++;
+
 		err = run_one(fp, &tests[i]);
 		release_filter(fp, i);
 
@@ -2018,13 +4649,24 @@
 		}
 	}
 
-	pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+	pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+		pass_cnt, err_cnt, jit_cnt, run_cnt);
+
 	return err_cnt ? -EINVAL : 0;
 }
 
 static int __init test_bpf_init(void)
 {
-	return test_bpf();
+	int ret;
+
+	ret = prepare_bpf_tests();
+	if (ret < 0)
+		return ret;
+
+	ret = test_bpf();
+
+	destroy_bpf_tests();
+	return ret;
 }
 
 static void __exit test_bpf_exit(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index b295754..c90777e 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -1,14 +1,9 @@
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
- * Code partially derived from nft_hash
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -26,20 +21,37 @@
 #include <linux/rhashtable.h>
 #include <linux/slab.h>
 
+#define MAX_ENTRIES	1000000
+#define TEST_INSERT_FAIL INT_MAX
 
-#define TEST_HT_SIZE	8
-#define TEST_ENTRIES	2048
-#define TEST_PTR	((void *) 0xdeadbeef)
-#define TEST_NEXPANDS	4
+static int entries = 50000;
+module_param(entries, int, 0);
+MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)");
+
+static int runs = 4;
+module_param(runs, int, 0);
+MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
+
+static int max_size = 65536;
+module_param(max_size, int, 0);
+MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
+
+static bool shrinking = false;
+module_param(shrinking, bool, 0);
+MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
+
+static int size = 8;
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
 
 struct test_obj {
-	void			*ptr;
 	int			value;
 	struct rhash_head	node;
 };
 
-static const struct rhashtable_params test_rht_params = {
-	.nelem_hint = TEST_HT_SIZE,
+static struct test_obj array[MAX_ENTRIES];
+
+static struct rhashtable_params test_rht_params = {
 	.head_offset = offsetof(struct test_obj, node),
 	.key_offset = offsetof(struct test_obj, value),
 	.key_len = sizeof(int),
@@ -51,11 +63,14 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < TEST_ENTRIES * 2; i++) {
+	for (i = 0; i < entries * 2; i++) {
 		struct test_obj *obj;
 		bool expected = !(i % 2);
 		u32 key = i;
 
+		if (array[i / 2].value == TEST_INSERT_FAIL)
+			expected = false;
+
 		obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
 
 		if (expected && !obj) {
@@ -66,9 +81,9 @@
 				key);
 			return -EEXIST;
 		} else if (expected && obj) {
-			if (obj->ptr != TEST_PTR || obj->value != i) {
-				pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
-					obj->ptr, TEST_PTR, obj->value, i);
+			if (obj->value != i) {
+				pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
+					obj->value, i);
 				return -EINVAL;
 			}
 		}
@@ -77,129 +92,147 @@
 	return 0;
 }
 
-static void test_bucket_stats(struct rhashtable *ht, bool quiet)
+static void test_bucket_stats(struct rhashtable *ht)
 {
-	unsigned int cnt, rcu_cnt, i, total = 0;
+	unsigned int err, total = 0, chain_len = 0;
+	struct rhashtable_iter hti;
 	struct rhash_head *pos;
-	struct test_obj *obj;
-	struct bucket_table *tbl;
 
-	tbl = rht_dereference_rcu(ht->tbl, ht);
-	for (i = 0; i < tbl->size; i++) {
-		rcu_cnt = cnt = 0;
-
-		if (!quiet)
-			pr_info(" [%#4x/%u]", i, tbl->size);
-
-		rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
-			cnt++;
-			total++;
-			if (!quiet)
-				pr_cont(" [%p],", obj);
-		}
-
-		rht_for_each_entry_rcu(obj, pos, tbl, i, node)
-			rcu_cnt++;
-
-		if (rcu_cnt != cnt)
-			pr_warn("Test failed: Chain count mismach %d != %d",
-				cnt, rcu_cnt);
-
-		if (!quiet)
-			pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
-				i, tbl->buckets[i], cnt);
+	err = rhashtable_walk_init(ht, &hti);
+	if (err) {
+		pr_warn("Test failed: allocation error");
+		return;
 	}
 
-	pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
-		total, atomic_read(&ht->nelems), TEST_ENTRIES);
+	err = rhashtable_walk_start(&hti);
+	if (err && err != -EAGAIN) {
+		pr_warn("Test failed: iterator failed: %d\n", err);
+		return;
+	}
 
-	if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
+	while ((pos = rhashtable_walk_next(&hti))) {
+		if (PTR_ERR(pos) == -EAGAIN) {
+			pr_info("Info: encountered resize\n");
+			chain_len++;
+			continue;
+		} else if (IS_ERR(pos)) {
+			pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
+				PTR_ERR(pos));
+			break;
+		}
+
+		total++;
+	}
+
+	rhashtable_walk_stop(&hti);
+	rhashtable_walk_exit(&hti);
+
+	pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
+		total, atomic_read(&ht->nelems), entries, chain_len);
+
+	if (total != atomic_read(&ht->nelems) || total != entries)
 		pr_warn("Test failed: Total count mismatch ^^^");
 }
 
-static int __init test_rhashtable(struct rhashtable *ht)
+static s64 __init test_rhashtable(struct rhashtable *ht)
 {
-	struct bucket_table *tbl;
 	struct test_obj *obj;
-	struct rhash_head *pos, *next;
 	int err;
-	unsigned int i;
+	unsigned int i, insert_fails = 0;
+	s64 start, end;
 
 	/*
 	 * Insertion Test:
-	 * Insert TEST_ENTRIES into table with all keys even numbers
+	 * Insert entries into table with all keys even numbers
 	 */
-	pr_info("  Adding %d keys\n", TEST_ENTRIES);
-	for (i = 0; i < TEST_ENTRIES; i++) {
-		struct test_obj *obj;
+	pr_info("  Adding %d keys\n", entries);
+	start = ktime_get_ns();
+	for (i = 0; i < entries; i++) {
+		struct test_obj *obj = &array[i];
 
-		obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-		if (!obj) {
-			err = -ENOMEM;
-			goto error;
-		}
-
-		obj->ptr = TEST_PTR;
 		obj->value = i * 2;
 
 		err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
-		if (err) {
-			kfree(obj);
-			goto error;
+		if (err == -ENOMEM || err == -EBUSY) {
+			/* Mark failed inserts but continue */
+			obj->value = TEST_INSERT_FAIL;
+			insert_fails++;
+		} else if (err) {
+			return err;
 		}
 	}
 
+	if (insert_fails)
+		pr_info("  %u insertions failed due to memory pressure\n",
+			insert_fails);
+
+	test_bucket_stats(ht);
 	rcu_read_lock();
-	test_bucket_stats(ht, true);
 	test_rht_lookup(ht);
 	rcu_read_unlock();
 
-	rcu_read_lock();
-	test_bucket_stats(ht, true);
-	rcu_read_unlock();
+	test_bucket_stats(ht);
 
-	pr_info("  Deleting %d keys\n", TEST_ENTRIES);
-	for (i = 0; i < TEST_ENTRIES; i++) {
+	pr_info("  Deleting %d keys\n", entries);
+	for (i = 0; i < entries; i++) {
 		u32 key = i * 2;
 
-		obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
-		BUG_ON(!obj);
+		if (array[i].value != TEST_INSERT_FAIL) {
+			obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
+			BUG_ON(!obj);
 
-		rhashtable_remove_fast(ht, &obj->node, test_rht_params);
-		kfree(obj);
+			rhashtable_remove_fast(ht, &obj->node, test_rht_params);
+		}
 	}
 
-	return 0;
+	end = ktime_get_ns();
+	pr_info("  Duration of test: %lld ns\n", end - start);
 
-error:
-	tbl = rht_dereference_rcu(ht->tbl, ht);
-	for (i = 0; i < tbl->size; i++)
-		rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
-			kfree(obj);
-
-	return err;
+	return end - start;
 }
 
 static struct rhashtable ht;
 
 static int __init test_rht_init(void)
 {
-	int err;
+	int i, err;
+	u64 total_time = 0;
 
-	pr_info("Running resizable hashtable tests...\n");
+	entries = min(entries, MAX_ENTRIES);
 
-	err = rhashtable_init(&ht, &test_rht_params);
-	if (err < 0) {
-		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
-			err);
-		return err;
+	test_rht_params.automatic_shrinking = shrinking;
+	test_rht_params.max_size = max_size;
+	test_rht_params.nelem_hint = size;
+
+	pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
+		size, max_size, shrinking);
+
+	for (i = 0; i < runs; i++) {
+		s64 time;
+
+		pr_info("Test %02d:\n", i);
+		memset(&array, 0, sizeof(array));
+		err = rhashtable_init(&ht, &test_rht_params);
+		if (err < 0) {
+			pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+				err);
+			continue;
+		}
+
+		time = test_rhashtable(&ht);
+		rhashtable_destroy(&ht);
+		if (time < 0) {
+			pr_warn("Test failed: return code %lld\n", time);
+			return -EINVAL;
+		}
+
+		total_time += time;
 	}
 
-	err = test_rhashtable(&ht);
+	do_div(total_time, runs);
+	pr_info("Average test time: %llu\n", total_time);
 
-	rhashtable_destroy(&ht);
-
-	return err;
+	return 0;
 }
 
 static void __exit test_rht_exit(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ebffa0e..2fd31ae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2967,6 +2967,104 @@
 EXPORT_SYMBOL(free_pages);
 
 /*
+ * Page Fragment:
+ *  An arbitrary-length arbitrary-offset area of memory which resides
+ *  within a 0 or higher order page.  Multiple fragments within that page
+ *  are individually refcounted, in the page's reference counter.
+ *
+ * The page_frag functions below provide a simple allocation framework for
+ * page fragments.  This is used by the network stack and network device
+ * drivers to provide a backing region of memory for use as either an
+ * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
+ */
+static struct page *__page_frag_refill(struct page_frag_cache *nc,
+				       gfp_t gfp_mask)
+{
+	struct page *page = NULL;
+	gfp_t gfp = gfp_mask;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+		    __GFP_NOMEMALLOC;
+	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+				PAGE_FRAG_CACHE_MAX_ORDER);
+	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+#endif
+	if (unlikely(!page))
+		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+
+	nc->va = page ? page_address(page) : NULL;
+
+	return page;
+}
+
+void *__alloc_page_frag(struct page_frag_cache *nc,
+			unsigned int fragsz, gfp_t gfp_mask)
+{
+	unsigned int size = PAGE_SIZE;
+	struct page *page;
+	int offset;
+
+	if (unlikely(!nc->va)) {
+refill:
+		page = __page_frag_refill(nc, gfp_mask);
+		if (!page)
+			return NULL;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+		/* if size can vary use size else just use PAGE_SIZE */
+		size = nc->size;
+#endif
+		/* Even if we own the page, we do not use atomic_set().
+		 * This would break get_page_unless_zero() users.
+		 */
+		atomic_add(size - 1, &page->_count);
+
+		/* reset page count bias and offset to start of new frag */
+		nc->pfmemalloc = page->pfmemalloc;
+		nc->pagecnt_bias = size;
+		nc->offset = size;
+	}
+
+	offset = nc->offset - fragsz;
+	if (unlikely(offset < 0)) {
+		page = virt_to_page(nc->va);
+
+		if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
+			goto refill;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+		/* if size can vary use size else just use PAGE_SIZE */
+		size = nc->size;
+#endif
+		/* OK, page count is 0, we can safely set it */
+		atomic_set(&page->_count, size);
+
+		/* reset page count bias and offset to start of new frag */
+		nc->pagecnt_bias = size;
+		offset = size - fragsz;
+	}
+
+	nc->pagecnt_bias--;
+	nc->offset = offset;
+
+	return nc->va + offset;
+}
+EXPORT_SYMBOL(__alloc_page_frag);
+
+/*
+ * Frees a page fragment allocated out of either a compound or order 0 page.
+ */
+void __free_page_frag(void *addr)
+{
+	struct page *page = virt_to_head_page(addr);
+
+	if (unlikely(put_page_testzero(page)))
+		__free_pages_ok(page, compound_order(page));
+}
+EXPORT_SYMBOL(__free_page_frag);
+
+/*
  * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
  * of the current memory cgroup.
  *
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 59555f0..d2cd9de 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -618,6 +618,92 @@
 	return err;
 }
 
+static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
+					 struct sk_buff *skb)
+{
+	struct sk_buff *p, **pp = NULL;
+	struct vlan_hdr *vhdr;
+	unsigned int hlen, off_vlan;
+	const struct packet_offload *ptype;
+	__be16 type;
+	int flush = 1;
+
+	off_vlan = skb_gro_offset(skb);
+	hlen = off_vlan + sizeof(*vhdr);
+	vhdr = skb_gro_header_fast(skb, off_vlan);
+	if (skb_gro_header_hard(skb, hlen)) {
+		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
+		if (unlikely(!vhdr))
+			goto out;
+	}
+
+	type = vhdr->h_vlan_encapsulated_proto;
+
+	rcu_read_lock();
+	ptype = gro_find_receive_by_type(type);
+	if (!ptype)
+		goto out_unlock;
+
+	flush = 0;
+
+	for (p = *head; p; p = p->next) {
+		struct vlan_hdr *vhdr2;
+
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
+		if (compare_vlan_header(vhdr, vhdr2))
+			NAPI_GRO_CB(p)->same_flow = 0;
+	}
+
+	skb_gro_pull(skb, sizeof(*vhdr));
+	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+	pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	return pp;
+}
+
+static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
+	__be16 type = vhdr->h_vlan_encapsulated_proto;
+	struct packet_offload *ptype;
+	int err = -ENOENT;
+
+	rcu_read_lock();
+	ptype = gro_find_complete_by_type(type);
+	if (ptype)
+		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+
+	rcu_read_unlock();
+	return err;
+}
+
+static struct packet_offload vlan_packet_offloads[] __read_mostly = {
+	{
+		.type = cpu_to_be16(ETH_P_8021Q),
+		.priority = 10,
+		.callbacks = {
+			.gro_receive = vlan_gro_receive,
+			.gro_complete = vlan_gro_complete,
+		},
+	},
+	{
+		.type = cpu_to_be16(ETH_P_8021AD),
+		.priority = 10,
+		.callbacks = {
+			.gro_receive = vlan_gro_receive,
+			.gro_complete = vlan_gro_complete,
+		},
+	},
+};
+
 static int __net_init vlan_init_net(struct net *net)
 {
 	struct vlan_net *vn = net_generic(net, vlan_net_id);
@@ -645,6 +731,7 @@
 static int __init vlan_proto_init(void)
 {
 	int err;
+	unsigned int i;
 
 	pr_info("%s v%s\n", vlan_fullname, vlan_version);
 
@@ -668,6 +755,9 @@
 	if (err < 0)
 		goto err5;
 
+	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+		dev_add_offload(&vlan_packet_offloads[i]);
+
 	vlan_ioctl_set(vlan_ioctl_handler);
 	return 0;
 
@@ -685,7 +775,13 @@
 
 static void __exit vlan_cleanup_module(void)
 {
+	unsigned int i;
+
 	vlan_ioctl_set(NULL);
+
+	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+		dev_remove_offload(&vlan_packet_offloads[i]);
+
 	vlan_netlink_fini();
 
 	unregister_netdevice_notifier(&vlan_notifier_block);
diff --git a/net/Kconfig b/net/Kconfig
index 44dd578..57a7c5a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -45,6 +45,9 @@
 	  Newly written code should NEVER need this option but do
 	  compat-independent messages instead!
 
+config NET_INGRESS
+	bool
+
 menu "Networking options"
 
 source "net/packet/Kconfig"
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3b7ad43..d5871ac 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1030,7 +1030,7 @@
 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
 		goto out;
 	rc = -ENOMEM;
-	sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
+	sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
 	if (!sk)
 		goto out;
 	rc = 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index ed04666..49a872d 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -141,7 +141,7 @@
 	.release_cb = vcc_release_cb,
 };
 
-int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
+int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern)
 {
 	struct sock *sk;
 	struct atm_vcc *vcc;
@@ -149,7 +149,7 @@
 	sock->sk = NULL;
 	if (sock->type == SOCK_STREAM)
 		return -EINVAL;
-	sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
+	sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 	sock_init_data(sock, sk);
diff --git a/net/atm/common.h b/net/atm/common.h
index 4d6f5b2..959436b 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -10,7 +10,7 @@
 #include <linux/poll.h> /* for poll_table */
 
 
-int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
+int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern);
 int vcc_release(struct socket *sock);
 int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
 int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index ae03240..040207ec 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -136,7 +136,7 @@
 		return -EAFNOSUPPORT;
 
 	sock->ops = &pvc_proto_ops;
-	return vcc_create(net, sock, protocol, PF_ATMPVC);
+	return vcc_create(net, sock, protocol, PF_ATMPVC, kern);
 }
 
 static const struct net_proto_family pvc_family_ops = {
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 1ba23f5..3fa0a9e 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -660,7 +660,7 @@
 		return -EAFNOSUPPORT;
 
 	sock->ops = &svc_proto_ops;
-	error = vcc_create(net, sock, protocol, AF_ATMSVC);
+	error = vcc_create(net, sock, protocol, AF_ATMSVC, kern);
 	if (error)
 		return error;
 	ATM_SD(sock)->local.sas_family = AF_ATMSVC;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 330c1f4..9c891d0 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -40,7 +40,6 @@
 #include <linux/notifier.h>
 #include <linux/proc_fs.h>
 #include <linux/stat.h>
-#include <linux/netfilter.h>
 #include <linux/sysctl.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
@@ -855,7 +854,7 @@
 		return -ESOCKTNOSUPPORT;
 	}
 
-	sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
+	sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern);
 	if (sk == NULL)
 		return -ENOMEM;
 
@@ -881,7 +880,7 @@
 	struct sock *sk;
 	ax25_cb *ax25, *oax25;
 
-	sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC,	osk->sk_prot);
+	sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0);
 	if (sk == NULL)
 		return NULL;
 
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 7ed8ab7..29a3687 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -23,7 +23,6 @@
 #include <linux/inet.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/netfilter.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 #include <asm/uaccess.h>
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 7c646bb..b563a3f 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -31,7 +31,6 @@
 #include <linux/notifier.h>
 #include <linux/proc_fs.h>
 #include <linux/stat.h>
-#include <linux/netfilter.h>
 #include <linux/sysctl.h>
 #include <net/ip.h>
 #include <net/arp.h>
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index be2acab..8ddd41b 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -24,7 +24,6 @@
 #include <linux/inet.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/netfilter.h>
 #include <net/sock.h>
 #include <asm/uaccess.h>
 #include <linux/fcntl.h>
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 71c4bad..4ad2fb7 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -34,7 +34,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
-#include <linux/netfilter.h>
 #include <linux/sysctl.h>
 #include <linux/export.h>
 #include <net/ip.h>
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index eb7d8c03..21434ab 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
@@ -20,7 +20,7 @@
 batman-adv-y += bat_iv_ogm.o
 batman-adv-y += bitarray.o
 batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
-batman-adv-y += debugfs.o
+batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o
 batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
 batman-adv-y += fragmentation.o
 batman-adv-y += gateway_client.o
@@ -29,6 +29,7 @@
 batman-adv-y += hash.o
 batman-adv-y += icmp_socket.o
 batman-adv-y += main.o
+batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
 batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
 batman-adv-y += originator.o
 batman-adv-y += routing.o
@@ -36,4 +37,3 @@
 batman-adv-y += soft-interface.o
 batman-adv-y += sysfs.o
 batman-adv-y += translation-table.o
-batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 4e49666..4e59cf3 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 00e00e0..753383c 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,20 +15,50 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "routing.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "hard-interface.h"
-#include "send.h"
 #include "bat_algo.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "bitarray.h"
+#include "hard-interface.h"
+#include "hash.h"
 #include "network-coding.h"
+#include "originator.h"
+#include "packet.h"
+#include "routing.h"
+#include "send.h"
+#include "translation-table.h"
 
 /**
  * enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_NO_DUP: the packet is no duplicate
  * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
  *  neighbor)
  * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
@@ -55,7 +85,7 @@
 }
 
 /**
- * batadv_ring_buffer_set - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg - compute the average of all non-zero values stored
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
@@ -64,7 +94,9 @@
 static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
 {
 	const uint8_t *ptr;
-	uint16_t count = 0, i = 0, sum = 0;
+	uint16_t count = 0;
+	uint16_t i = 0;
+	uint16_t sum = 0;
 
 	ptr = lq_recv;
 
@@ -308,7 +340,6 @@
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	unsigned char *ogm_buff;
 	uint32_t random_seqno;
-	int res = -ENOMEM;
 
 	/* randomize initial seqno to avoid collision */
 	get_random_bytes(&random_seqno, sizeof(random_seqno));
@@ -317,7 +348,7 @@
 	hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
 	ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
 	if (!ogm_buff)
-		goto out;
+		return -ENOMEM;
 
 	hard_iface->bat_iv.ogm_buff = ogm_buff;
 
@@ -329,10 +360,7 @@
 	batadv_ogm_packet->reserved = 0;
 	batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
 
-	res = 0;
-
-out:
-	return res;
+	return 0;
 }
 
 static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -396,8 +424,8 @@
 }
 
 /* is there another aggregated packet here? */
-static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-				     __be16 tvlv_len)
+static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+				      __be16 tvlv_len)
 {
 	int next_buff_pos = 0;
 
@@ -413,7 +441,7 @@
 				     struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-	char *fwd_str;
+	const char *fwd_str;
 	uint8_t packet_num;
 	int16_t buff_pos;
 	struct batadv_ogm_packet *batadv_ogm_packet;
@@ -451,7 +479,7 @@
 			   batadv_ogm_packet->orig,
 			   ntohl(batadv_ogm_packet->seqno),
 			   batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
-			   (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
+			   ((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ?
 			    "on" : "off"),
 			   hard_iface->net_dev->name,
 			   hard_iface->net_dev->dev_addr);
@@ -548,58 +576,62 @@
 	 * - the send time is within our MAX_AGGREGATION_MS time
 	 * - the resulting packet wont be bigger than
 	 *   MAX_AGGREGATION_BYTES
+	 * otherwise aggregation is not possible
 	 */
-	if (time_before(send_time, forw_packet->send_time) &&
-	    time_after_eq(aggregation_end_time, forw_packet->send_time) &&
-	    (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
-		/* check aggregation compatibility
-		 * -> direct link packets are broadcasted on
-		 *    their interface only
-		 * -> aggregate packet if the current packet is
-		 *    a "global" packet as well as the base
-		 *    packet
-		 */
-		primary_if = batadv_primary_if_get_selected(bat_priv);
-		if (!primary_if)
-			goto out;
+	if (!time_before(send_time, forw_packet->send_time) ||
+	    !time_after_eq(aggregation_end_time, forw_packet->send_time))
+		return false;
 
-		/* packet is not leaving on the same interface. */
-		if (forw_packet->if_outgoing != if_outgoing)
-			goto out;
+	if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
+		return false;
 
-		/* packets without direct link flag and high TTL
-		 * are flooded through the net
-		 */
-		if ((!directlink) &&
-		    (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
-		    (batadv_ogm_packet->ttl != 1) &&
+	/* packet is not leaving on the same interface. */
+	if (forw_packet->if_outgoing != if_outgoing)
+		return false;
 
-		    /* own packets originating non-primary
-		     * interfaces leave only that interface
-		     */
-		    ((!forw_packet->own) ||
-		     (forw_packet->if_incoming == primary_if))) {
-			res = true;
-			goto out;
-		}
+	/* check aggregation compatibility
+	 * -> direct link packets are broadcasted on
+	 *    their interface only
+	 * -> aggregate packet if the current packet is
+	 *    a "global" packet as well as the base
+	 *    packet
+	 */
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		return false;
 
-		/* if the incoming packet is sent via this one
-		 * interface only - we still can aggregate
-		 */
-		if ((directlink) &&
-		    (new_bat_ogm_packet->ttl == 1) &&
-		    (forw_packet->if_incoming == if_incoming) &&
+	/* packets without direct link flag and high TTL
+	 * are flooded through the net
+	 */
+	if (!directlink &&
+	    !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) &&
+	    batadv_ogm_packet->ttl != 1 &&
 
-		    /* packets from direct neighbors or
-		     * own secondary interface packets
-		     * (= secondary interface packets in general)
-		     */
-		    (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
-		     (forw_packet->own &&
-		      forw_packet->if_incoming != primary_if))) {
-			res = true;
-			goto out;
-		}
+	    /* own packets originating non-primary
+	     * interfaces leave only that interface
+	     */
+	    (!forw_packet->own ||
+	     forw_packet->if_incoming == primary_if)) {
+		res = true;
+		goto out;
+	}
+
+	/* if the incoming packet is sent via this one
+	 * interface only - we still can aggregate
+	 */
+	if (directlink &&
+	    new_bat_ogm_packet->ttl == 1 &&
+	    forw_packet->if_incoming == if_incoming &&
+
+	    /* packets from direct neighbors or
+	     * own secondary interface packets
+	     * (= secondary interface packets in general)
+	     */
+	    (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
+	     (forw_packet->own &&
+	      forw_packet->if_incoming != primary_if))) {
+		res = true;
+		goto out;
 	}
 
 out:
@@ -642,19 +674,16 @@
 		if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
 			batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 				   "batman packet queue full\n");
-			goto out;
+			goto out_free_outgoing;
 		}
 	}
 
 	forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
-	if (!forw_packet_aggr) {
-		if (!own_packet)
-			atomic_inc(&bat_priv->batman_queue_left);
-		goto out;
-	}
+	if (!forw_packet_aggr)
+		goto out_nomem;
 
-	if ((atomic_read(&bat_priv->aggregated_ogms)) &&
-	    (packet_len < BATADV_MAX_AGGREGATION_BYTES))
+	if (atomic_read(&bat_priv->aggregated_ogms) &&
+	    packet_len < BATADV_MAX_AGGREGATION_BYTES)
 		skb_size = BATADV_MAX_AGGREGATION_BYTES;
 	else
 		skb_size = packet_len;
@@ -662,12 +691,8 @@
 	skb_size += ETH_HLEN;
 
 	forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
-	if (!forw_packet_aggr->skb) {
-		if (!own_packet)
-			atomic_inc(&bat_priv->batman_queue_left);
-		kfree(forw_packet_aggr);
-		goto out;
-	}
+	if (!forw_packet_aggr->skb)
+		goto out_free_forw_packet;
 	forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
 	skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
 
@@ -699,7 +724,12 @@
 			   send_time - jiffies);
 
 	return;
-out:
+out_free_forw_packet:
+	kfree(forw_packet_aggr);
+out_nomem:
+	if (!own_packet)
+		atomic_inc(&bat_priv->batman_queue_left);
+out_free_outgoing:
 	batadv_hardif_free_ref(if_outgoing);
 out_free_incoming:
 	batadv_hardif_free_ref(if_incoming);
@@ -752,13 +782,13 @@
 	unsigned long max_aggregation_jiffies;
 
 	batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
-	direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
+	direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK);
 	max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
 
 	/* find position for the packet in the forward queue */
 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
 	/* own packets are not to be aggregated */
-	if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
+	if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) {
 		hlist_for_each_entry(forw_packet_pos,
 				     &bat_priv->forw_bat_list, list) {
 			if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
@@ -1034,9 +1064,10 @@
 		batadv_orig_node_free_ref(orig_tmp);
 		if (!neigh_node)
 			goto unlock;
-	} else
+	} else {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Updating existing last-hop neighbor of originator\n");
+	}
 
 	rcu_read_unlock();
 	neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1081,7 +1112,7 @@
 	 * won't consider it either
 	 */
 	if (router_ifinfo &&
-	    (neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg)) {
+	    neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
 		orig_node_tmp = router->orig_node;
 		spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 		if_num = router->if_incoming->if_num;
@@ -1356,8 +1387,7 @@
 out:
 	spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 	batadv_orig_node_free_ref(orig_node);
-	if (orig_ifinfo)
-		batadv_orig_ifinfo_free_ref(orig_ifinfo);
+	batadv_orig_ifinfo_free_ref(orig_ifinfo);
 	return ret;
 }
 
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index e3da07a..cf68c32 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -15,10 +15,10 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "bitarray.h"
+#include "main.h"
 
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
 
 /* shift the packet array by n places. */
 static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 2acaafe..0c24562 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -18,6 +18,12 @@
 #ifndef _NET_BATMAN_ADV_BITARRAY_H_
 #define _NET_BATMAN_ADV_BITARRAY_H_
 
+#include "main.h"
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
 /* Returns 1 if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno. Otherwise returns 0.
  */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ac4b96e..ba06092 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -15,19 +15,41 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include "hash.h"
-#include "hard-interface.h"
-#include "originator.h"
 #include "bridge_loop_avoidance.h"
-#include "translation-table.h"
-#include "send.h"
+#include "main.h"
 
-#include <linux/etherdevice.h>
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
 #include <linux/crc16.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
 #include <linux/if_arp.h>
-#include <net/arp.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/arp.h>
+
+#include "hard-interface.h"
+#include "hash.h"
+#include "originator.h"
+#include "packet.h"
+#include "translation-table.h"
 
 static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
 
@@ -42,12 +64,8 @@
 	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
 	uint32_t hash = 0;
 
-	hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
-	hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
+	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
+	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
 
 	return hash % size;
 }
@@ -59,12 +77,8 @@
 	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
 	uint32_t hash = 0;
 
-	hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
-	hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
+	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
+	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
 
 	return hash % size;
 }
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 43c985d..0282690 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -18,6 +18,16 @@
 #ifndef _NET_BATMAN_ADV_BLA_H_
 #define _NET_BATMAN_ADV_BLA_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_hard_iface;
+struct batadv_orig_node;
+struct batadv_priv;
+struct seq_file;
+struct sk_buff;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
 		  unsigned short vid, bool is_bcast);
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index a4972874..c4c1e80 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -15,21 +15,42 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
+#include "debugfs.h"
 #include "main.h"
 
+#include <linux/compiler.h>
 #include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/printk.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <stdarg.h>
 
-#include "debugfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "soft-interface.h"
-#include "icmp_socket.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "icmp_socket.h"
 #include "network-coding.h"
+#include "originator.h"
+#include "translation-table.h"
 
 static struct dentry *batadv_debugfs;
 
@@ -482,11 +503,7 @@
 	debugfs_remove_recursive(hard_iface->debug_dir);
 	hard_iface->debug_dir = NULL;
 out:
-#ifdef CONFIG_DEBUG_FS
 	return -ENOMEM;
-#else
-	return 0;
-#endif /* CONFIG_DEBUG_FS */
 }
 
 /**
@@ -541,11 +558,7 @@
 	debugfs_remove_recursive(bat_priv->debug_dir);
 	bat_priv->debug_dir = NULL;
 out:
-#ifdef CONFIG_DEBUG_FS
 	return -ENOMEM;
-#else
-	return 0;
-#endif /* CONFIG_DEBUG_FS */
 }
 
 void batadv_debugfs_del_meshif(struct net_device *dev)
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 37c4d6d..187acdc 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -18,8 +18,17 @@
 #ifndef _NET_BATMAN_ADV_DEBUGFS_H_
 #define _NET_BATMAN_ADV_DEBUGFS_H_
 
+#include "main.h"
+
+#include <linux/kconfig.h>
+
+struct batadv_hard_iface;
+struct net_device;
+
 #define BATADV_DEBUGFS_SUBDIR "batman_adv"
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
 void batadv_debugfs_init(void);
 void batadv_debugfs_destroy(void);
 int batadv_debugfs_add_meshif(struct net_device *dev);
@@ -27,4 +36,36 @@
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
 
+#else
+
+static inline void batadv_debugfs_init(void)
+{
+}
+
+static inline void batadv_debugfs_destroy(void)
+{
+}
+
+static inline int batadv_debugfs_add_meshif(struct net_device *dev)
+{
+	return 0;
+}
+
+static inline void batadv_debugfs_del_meshif(struct net_device *dev)
+{
+}
+
+static inline
+int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
+{
+	return 0;
+}
+
+static inline
+void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
+#endif
+
 #endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index aad022d..fb54e6a 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
@@ -15,18 +15,36 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/if_ether.h>
+#include "distributed-arp-table.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
 #include <linux/if_arp.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
 #include <net/arp.h>
 
-#include "main.h"
-#include "hash.h"
-#include "distributed-arp-table.h"
 #include "hard-interface.h"
+#include "hash.h"
 #include "originator.h"
 #include "send.h"
-#include "types.h"
 #include "translation-table.h"
 
 static void batadv_dat_purge(struct work_struct *work);
@@ -206,9 +224,22 @@
 {
 	uint32_t hash = 0;
 	const struct batadv_dat_entry *dat = data;
+	const unsigned char *key;
+	uint32_t i;
 
-	hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
-	hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
+	key = (const unsigned char *)&dat->ip;
+	for (i = 0; i < sizeof(dat->ip); i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	key = (const unsigned char *)&dat->vid;
+	for (i = 0; i < sizeof(dat->vid); i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
 
 	hash += (hash << 3);
 	hash ^= (hash >> 11);
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 2fe0764..3181507 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
@@ -18,12 +18,19 @@
 #ifndef _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_
 #define _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_
 
-#ifdef CONFIG_BATMAN_ADV_DAT
+#include "main.h"
 
-#include "types.h"
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
 #include "originator.h"
+#include "packet.h"
 
-#include <linux/if_arp.h>
+struct seq_file;
+struct sk_buff;
+
+#ifdef CONFIG_BATMAN_ADV_DAT
 
 /* BATADV_DAT_ADDR_MAX - maximum address value in the DHT space */
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 3d1dcaa..c0f0d01 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -15,12 +15,28 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "fragmentation.h"
-#include "send.h"
-#include "originator.h"
-#include "routing.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
 #include "hard-interface.h"
+#include "originator.h"
+#include "packet.h"
+#include "routing.h"
+#include "send.h"
 #include "soft-interface.h"
 
 /**
@@ -161,6 +177,7 @@
 		hlist_add_head(&frag_entry_new->list, &chain->head);
 		chain->size = skb->len - hdr_size;
 		chain->timestamp = jiffies;
+		chain->total_size = ntohs(frag_packet->total_size);
 		ret = true;
 		goto out;
 	}
@@ -195,9 +212,11 @@
 
 out:
 	if (chain->size > batadv_frag_size_limit() ||
-	    ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+	    chain->total_size != ntohs(frag_packet->total_size) ||
+	    chain->total_size > batadv_frag_size_limit()) {
 		/* Clear chain if total size of either the list or the packet
-		 * exceeds the maximum size of one merged packet.
+		 * exceeds the maximum size of one merged packet. Don't allow
+		 * packets to have different total_size.
 		 */
 		batadv_frag_clear_chain(&chain->head);
 		chain->size = 0;
@@ -228,19 +247,13 @@
  * Returns the merged skb or NULL on error.
  */
 static struct sk_buff *
-batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+batadv_frag_merge_packets(struct hlist_head *chain)
 {
 	struct batadv_frag_packet *packet;
 	struct batadv_frag_list_entry *entry;
 	struct sk_buff *skb_out = NULL;
 	int size, hdr_size = sizeof(struct batadv_frag_packet);
 
-	/* Make sure incoming skb has non-bogus data. */
-	packet = (struct batadv_frag_packet *)skb->data;
-	size = ntohs(packet->total_size);
-	if (size > batadv_frag_size_limit())
-		goto free;
-
 	/* Remove first entry, as this is the destination for the rest of the
 	 * fragments.
 	 */
@@ -249,6 +262,9 @@
 	skb_out = entry->skb;
 	kfree(entry);
 
+	packet = (struct batadv_frag_packet *)skb_out->data;
+	size = ntohs(packet->total_size);
+
 	/* Make room for the rest of the fragments. */
 	if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
 		kfree_skb(skb_out);
@@ -304,7 +320,7 @@
 	if (hlist_empty(&head))
 		goto out;
 
-	skb_out = batadv_frag_merge_packets(&head, *skb);
+	skb_out = batadv_frag_merge_packets(&head);
 	if (!skb_out)
 		goto out_err;
 
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index d848cf6..8b9877e 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -18,6 +18,15 @@
 #ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
 #define _NET_BATMAN_ADV_FRAGMENTATION_H_
 
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct sk_buff;
+
 void batadv_frag_purge_orig(struct batadv_orig_node *orig,
 			    bool (*check_cb)(struct batadv_frag_table_entry *));
 bool batadv_frag_skb_fwd(struct sk_buff *skb,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 090828c..bb015862 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -15,18 +15,38 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include "sysfs.h"
 #include "gateway_client.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/udp.h>
+
 #include "gateway_common.h"
 #include "hard-interface.h"
 #include "originator.h"
-#include "translation-table.h"
+#include "packet.h"
 #include "routing.h"
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/udp.h>
-#include <linux/if_vlan.h>
+#include "sysfs.h"
+#include "translation-table.h"
 
 /* These are the offsets of the "hw type" and "hw address length" in the dhcp
  * packet starting at the beginning of the dhcp header
@@ -733,11 +753,6 @@
 	if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
 		return BATADV_DHCP_NO;
 
-	/* skb->data might have been reallocated by pskb_may_pull() */
-	ethhdr = eth_hdr(skb);
-	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
-		ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
-
 	udphdr = (struct udphdr *)(skb->data + *header_len);
 	*header_len += sizeof(*udphdr);
 
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 7ee53bb..89565b4 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -18,6 +18,14 @@
 #ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
 #define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_tvlv_gateway_data;
+struct seq_file;
+struct sk_buff;
+
 void batadv_gw_check_client_stop(struct batadv_priv *bat_priv);
 void batadv_gw_reselect(struct batadv_priv *bat_priv);
 void batadv_gw_election(struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 88a1bc3..39cf44c 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -15,9 +15,18 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "gateway_common.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+
 #include "gateway_client.h"
+#include "packet.h"
 
 /**
  * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index aa51165..bd5c812 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -18,6 +18,13 @@
 #ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
 #define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_priv;
+struct net_device;
+
 enum batadv_gw_modes {
 	BATADV_GW_MODE_OFF,
 	BATADV_GW_MODE_CLIENT,
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index baf1f98..f4a15d2 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,22 +15,36 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include "distributed-arp-table.h"
 #include "hard-interface.h"
-#include "soft-interface.h"
-#include "send.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "sysfs.h"
-#include "debugfs.h"
-#include "originator.h"
-#include "hash.h"
-#include "bridge_loop_avoidance.h"
-#include "gateway_client.h"
+#include "main.h"
 
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+
+#include "bridge_loop_avoidance.h"
+#include "debugfs.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "originator.h"
+#include "packet.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "sysfs.h"
+#include "translation-table.h"
 
 void batadv_hardif_free_rcu(struct rcu_head *rcu)
 {
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 1918cd5..5a31420 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -18,6 +18,17 @@
 #ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
 #define _NET_BATMAN_ADV_HARD_INTERFACE_H_
 
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct net_device;
+
 enum batadv_hard_if_state {
 	BATADV_IF_NOT_IN_USE,
 	BATADV_IF_TO_BE_REMOVED,
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 7c1c630..e89f314 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -15,8 +15,12 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "hash.h"
+#include "main.h"
+
+#include <linux/fs.h>
+#include <linux/lockdep.h>
+#include <linux/slab.h>
 
 /* clears the hash */
 static void batadv_hash_init(struct batadv_hashtable *hash)
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 539fc12..5065f50 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -18,7 +18,16 @@
 #ifndef _NET_BATMAN_ADV_HASH_H_
 #define _NET_BATMAN_ADV_HASH_H_
 
+#include "main.h"
+
+#include <linux/compiler.h>
 #include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct lock_class_key;
 
 /* callback to a compare function.  should compare 2 element datas for their
  * keys, return 0 if same and not 0 if not same
@@ -80,28 +89,6 @@
 }
 
 /**
- *	batadv_hash_bytes - hash some bytes and add them to the previous hash
- *	@hash: previous hash value
- *	@data: data to be hashed
- *	@size: number of bytes to be hashed
- *
- *	Returns the new hash value.
- */
-static inline uint32_t batadv_hash_bytes(uint32_t hash, const void *data,
-					 uint32_t size)
-{
-	const unsigned char *key = data;
-	int i;
-
-	for (i = 0; i < size; i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
-	return hash;
-}
-
-/**
  *	batadv_hash_add - adds data to the hashtable
  *	@hash: storage hash table
  *	@compare: callback to determine if 2 hash elements are identical
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 161ef8f..07061bc 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -15,14 +15,39 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include <linux/debugfs.h>
-#include <linux/slab.h>
 #include "icmp_socket.h"
-#include "send.h"
-#include "hash.h"
-#include "originator.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/export.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/printk.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
 #include "hard-interface.h"
+#include "originator.h"
+#include "packet.h"
+#include "send.h"
 
 static struct batadv_socket_client *batadv_socket_client_hash[256];
 
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 0c33950..7de7fce 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -18,6 +18,13 @@
 #ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
 #define _NET_BATMAN_ADV_ICMP_SOCKET_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_icmp_header;
+struct batadv_priv;
+
 #define BATADV_ICMP_SOCKET "socket"
 
 void batadv_socket_init(void);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 12fc77b..8457097 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,31 +15,53 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/crc32c.h>
-#include <linux/highmem.h>
-#include <linux/if_vlan.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/dsfield.h>
 #include "main.h"
-#include "sysfs.h"
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/crc32c.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/dsfield.h>
+#include <net/rtnetlink.h>
+
+#include "bat_algo.h"
+#include "bridge_loop_avoidance.h"
 #include "debugfs.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "icmp_socket.h"
+#include "multicast.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "packet.h"
 #include "routing.h"
 #include "send.h"
-#include "originator.h"
 #include "soft-interface.h"
-#include "icmp_socket.h"
 #include "translation-table.h"
-#include "hard-interface.h"
-#include "gateway_client.h"
-#include "bridge_loop_avoidance.h"
-#include "distributed-arp-table.h"
-#include "multicast.h"
-#include "gateway_common.h"
-#include "hash.h"
-#include "bat_algo.h"
-#include "network-coding.h"
-#include "fragmentation.h"
 
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  * list traversals just rcu-locked
@@ -209,10 +231,13 @@
  * interfaces in the current mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
+ *
+ * Returns 'true' if the mac address was found, false otherwise.
  */
-int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
 {
 	const struct batadv_hard_iface *hard_iface;
+	bool is_my_mac = false;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -223,12 +248,12 @@
 			continue;
 
 		if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
-			rcu_read_unlock();
-			return 1;
+			is_my_mac = true;
+			break;
 		}
 	}
 	rcu_read_unlock();
-	return 0;
+	return is_my_mac;
 }
 
 /**
@@ -510,14 +535,12 @@
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 {
 	struct batadv_algo_ops *bat_algo_ops_tmp;
-	int ret;
 
 	bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
 	if (bat_algo_ops_tmp) {
 		pr_info("Trying to register already registered routing algorithm: %s\n",
 			bat_algo_ops->name);
-		ret = -EEXIST;
-		goto out;
+		return -EEXIST;
 	}
 
 	/* all algorithms must implement all ops (for now) */
@@ -531,32 +554,26 @@
 	    !bat_algo_ops->bat_neigh_is_equiv_or_better) {
 		pr_info("Routing algo '%s' does not implement required ops\n",
 			bat_algo_ops->name);
-		ret = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	INIT_HLIST_NODE(&bat_algo_ops->list);
 	hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
-	ret = 0;
 
-out:
-	return ret;
+	return 0;
 }
 
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 {
 	struct batadv_algo_ops *bat_algo_ops;
-	int ret = -EINVAL;
 
 	bat_algo_ops = batadv_algo_get(name);
 	if (!bat_algo_ops)
-		goto out;
+		return -EINVAL;
 
 	bat_priv->bat_algo_ops = bat_algo_ops;
-	ret = 0;
 
-out:
-	return ret;
+	return 0;
 }
 
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
@@ -819,15 +836,15 @@
 	new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
 
 	/* keep old buffer if kmalloc should fail */
-	if (new_buff) {
-		memcpy(new_buff, *packet_buff, min_packet_len);
-		kfree(*packet_buff);
-		*packet_buff = new_buff;
-		*packet_buff_len = min_packet_len + additional_packet_len;
-		return true;
-	}
+	if (!new_buff)
+		return false;
 
-	return false;
+	memcpy(new_buff, *packet_buff, min_packet_len);
+	kfree(*packet_buff);
+	*packet_buff = new_buff;
+	*packet_buff_len = min_packet_len + additional_packet_len;
+
+	return true;
 }
 
 /**
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 4d23188..41d27c7 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2015.0"
+#define BATADV_SOURCE_VERSION "2015.1"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
 #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
 #define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
 #define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
-#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
+#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
@@ -163,28 +163,26 @@
 
 /* Kernel headers */
 
-#include <linux/mutex.h>	/* mutex */
-#include <linux/module.h>	/* needed by all modules */
-#include <linux/netdevice.h>	/* netdevice */
-#include <linux/etherdevice.h>  /* ethernet address classification */
-#include <linux/if_ether.h>	/* ethernet header */
-#include <linux/poll.h>		/* poll_table */
-#include <linux/kthread.h>	/* kernel threads */
-#include <linux/pkt_sched.h>	/* schedule types */
-#include <linux/workqueue.h>	/* workqueue */
+#include <linux/atomic.h>
+#include <linux/bitops.h> /* for packet.h */
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h> /* for packet.h */
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/types.h>
 #include <linux/percpu.h>
-#include <linux/slab.h>
-#include <net/sock.h>		/* struct sock */
-#include <net/addrconf.h>	/* ipv6 address stuff */
-#include <linux/ip.h>
-#include <net/rtnetlink.h>
 #include <linux/jiffies.h>
-#include <linux/seq_file.h>
 #include <linux/if_vlan.h>
 
 #include "types.h"
 
-#define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
+struct batadv_ogm_packet;
+struct seq_file;
+struct sk_buff;
+
+#define BATADV_PRINT_VID(vid) ((vid & BATADV_VLAN_HAS_TAG) ? \
 			       (int)(vid & VLAN_VID_MASK) : -1)
 
 extern char batadv_routing_algo[];
@@ -195,7 +193,7 @@
 
 int batadv_mesh_init(struct net_device *soft_iface);
 void batadv_mesh_free(struct net_device *soft_iface);
-int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
 int batadv_max_header_len(void);
@@ -279,7 +277,7 @@
  *
  * note: can't use ether_addr_equal() as it requires aligned memory
  */
-static inline int batadv_compare_eth(const void *data1, const void *data2)
+static inline bool batadv_compare_eth(const void *data1, const void *data2)
 {
 	return ether_addr_equal_unaligned(data1, data2);
 }
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index b24e4bb..7aa480b 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
@@ -15,10 +15,33 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "multicast.h"
-#include "originator.h"
-#include "hard-interface.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+
+#include "packet.h"
 #include "translation-table.h"
 
 /**
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 3a44ebd..beb6e56 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
@@ -18,6 +18,12 @@
 #ifndef _NET_BATMAN_ADV_MULTICAST_H_
 #define _NET_BATMAN_ADV_MULTICAST_H_
 
+#include "main.h"
+
+struct batadv_orig_node;
+struct batadv_priv;
+struct sk_buff;
+
 /**
  * batadv_forw_mode - the way a packet should be forwarded as
  * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 127cc4d..f0a50f3 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
@@ -15,15 +15,44 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/debugfs.h>
-
-#include "main.h"
-#include "hash.h"
 #include "network-coding.h"
-#include "send.h"
-#include "originator.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
 #include "hard-interface.h"
+#include "hash.h"
+#include "originator.h"
+#include "packet.h"
 #include "routing.h"
+#include "send.h"
 
 static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
 static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
@@ -155,7 +184,7 @@
  */
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 {
-	atomic_set(&bat_priv->network_coding, 1);
+	atomic_set(&bat_priv->network_coding, 0);
 	bat_priv->nc.min_tq = 200;
 	bat_priv->nc.max_fwd_delay = 10;
 	bat_priv->nc.max_buffer_time = 200;
@@ -275,7 +304,7 @@
 	 * max_buffer time
 	 */
 	return batadv_has_timed_out(nc_path->last_valid,
-				    bat_priv->nc.max_buffer_time*10);
+				    bat_priv->nc.max_buffer_time * 10);
 }
 
 /**
@@ -453,14 +482,8 @@
 	const struct batadv_nc_path *nc_path = data;
 	uint32_t hash = 0;
 
-	hash = batadv_hash_bytes(hash, &nc_path->prev_hop,
-				 sizeof(nc_path->prev_hop));
-	hash = batadv_hash_bytes(hash, &nc_path->next_hop,
-				 sizeof(nc_path->next_hop));
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
+	hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
+	hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
 
 	return hash % size;
 }
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 358c0d6..5b79aa8 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
@@ -18,6 +18,19 @@
 #ifndef _NET_BATMAN_ADV_NETWORK_CODING_H_
 #define _NET_BATMAN_ADV_NETWORK_CODING_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_nc_node;
+struct batadv_neigh_node;
+struct batadv_ogm_packet;
+struct batadv_orig_node;
+struct batadv_priv;
+struct net_device;
+struct seq_file;
+struct sk_buff;
+
 #ifdef CONFIG_BATMAN_ADV_NC
 
 void batadv_nc_status_update(struct net_device *net_dev);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 90e805a..018b749 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,19 +15,31 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include "distributed-arp-table.h"
 #include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
+#include "main.h"
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "distributed-arp-table.h"
+#include "fragmentation.h"
 #include "gateway_client.h"
 #include "hard-interface.h"
-#include "soft-interface.h"
-#include "bridge_loop_avoidance.h"
-#include "network-coding.h"
-#include "fragmentation.h"
+#include "hash.h"
 #include "multicast.h"
+#include "network-coding.h"
+#include "routing.h"
+#include "translation-table.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -197,13 +209,19 @@
 	struct hlist_node *node_tmp;
 	struct batadv_neigh_node *neigh_node;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
+	struct batadv_algo_ops *bao;
 
 	neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
+	bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
 
 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
 				  &neigh_node->ifinfo_list, list) {
 		batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
 	}
+
+	if (bao->bat_neigh_free)
+		bao->bat_neigh_free(neigh_node);
+
 	batadv_hardif_free_ref_now(neigh_node->if_incoming);
 
 	kfree(neigh_node);
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index aa4a436..79734d3 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -18,8 +18,21 @@
 #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
 #define _NET_BATMAN_ADV_ORIGINATOR_H_
 
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/if_ether.h>
+#include <linux/jhash.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
 #include "hash.h"
 
+struct seq_file;
+
 int batadv_compare_orig(const struct hlist_node *node, const void *data2);
 int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
@@ -75,20 +88,9 @@
  */
 static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
 {
-	const unsigned char *key = data;
 	uint32_t hash = 0;
-	size_t i;
 
-	for (i = 0; i < 6; i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
-
+	hash = jhash(data, ETH_ALEN, hash);
 	return hash % size;
 }
 
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b81fbbf..9e747c0 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -18,6 +18,9 @@
 #ifndef _NET_BATMAN_ADV_PACKET_H_
 #define _NET_BATMAN_ADV_PACKET_H_
 
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
 /**
  * enum batadv_packettype - types for batman-adv encapsulated packets
  * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index da83982..c360c0c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,20 +15,36 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "routing.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "icmp_socket.h"
-#include "translation-table.h"
-#include "originator.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+
+#include "bitarray.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
-#include "network-coding.h"
 #include "fragmentation.h"
-
-#include <linux/if_vlan.h>
+#include "hard-interface.h"
+#include "icmp_socket.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "packet.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "translation-table.h"
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 557d3d1..6bc29d3 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -18,6 +18,16 @@
 #ifndef _NET_BATMAN_ADV_ROUTING_H_
 #define _NET_BATMAN_ADV_ROUTING_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_hard_iface;
+struct batadv_neigh_node;
+struct batadv_orig_node;
+struct batadv_priv;
+struct sk_buff;
+
 bool batadv_check_management_packet(struct sk_buff *skb,
 				    struct batadv_hard_iface *hard_iface,
 				    int header_len);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3d64ed2..0a01992 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,19 +15,37 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
-#include "distributed-arp-table.h"
 #include "send.h"
-#include "routing.h"
-#include "translation-table.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "originator.h"
-#include "network-coding.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/workqueue.h>
+
+#include "distributed-arp-table.h"
 #include "fragmentation.h"
-#include "multicast.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "routing.h"
+#include "soft-interface.h"
+#include "translation-table.h"
 
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -255,8 +273,8 @@
 			    struct batadv_orig_node *orig_node,
 			    unsigned short vid)
 {
-	struct ethhdr *ethhdr;
 	struct batadv_unicast_packet *unicast_packet;
+	struct ethhdr *ethhdr;
 	int ret = NET_XMIT_DROP;
 
 	if (!orig_node)
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 38d0ec1..0536835f 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -18,6 +18,19 @@
 #ifndef _NET_BATMAN_ADV_SEND_H_
 #define _NET_BATMAN_ADV_SEND_H_
 
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "packet.h"
+
+struct batadv_hard_iface;
+struct batadv_orig_node;
+struct batadv_priv;
+struct sk_buff;
+struct work_struct;
+
 int batadv_send_skb_packet(struct sk_buff *skb,
 			   struct batadv_hard_iface *hard_iface,
 			   const uint8_t *dst_addr);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5ec31d7..c002961 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -15,26 +15,50 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "soft-interface.h"
-#include "hard-interface.h"
-#include "distributed-arp-table.h"
-#include "routing.h"
-#include "send.h"
-#include "debugfs.h"
-#include "translation-table.h"
-#include "hash.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "sysfs.h"
-#include "originator.h"
-#include <linux/slab.h>
-#include <linux/ethtool.h>
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
 #include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
-#include "multicast.h"
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
 #include "bridge_loop_avoidance.h"
+#include "debugfs.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "multicast.h"
 #include "network-coding.h"
+#include "packet.h"
+#include "send.h"
+#include "sysfs.h"
+#include "translation-table.h"
 
 static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 static void batadv_get_drvinfo(struct net_device *dev,
@@ -105,6 +129,7 @@
 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
+	struct batadv_softif_vlan *vlan;
 	struct sockaddr *addr = p;
 	uint8_t old_addr[ETH_ALEN];
 
@@ -115,12 +140,17 @@
 	ether_addr_copy(dev->dev_addr, addr->sa_data);
 
 	/* only modify transtable if it has been initialized before */
-	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
-		batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
+	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+		return 0;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+		batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
 				       "mac address changed", false);
-		batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS,
+		batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
 				    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
 	}
+	rcu_read_unlock();
 
 	return 0;
 }
@@ -732,7 +762,7 @@
 	atomic_set(&bat_priv->aggregated_ogms, 1);
 	atomic_set(&bat_priv->bonding, 0);
 #ifdef CONFIG_BATMAN_ADV_BLA
-	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
+	atomic_set(&bat_priv->bridge_loop_avoidance, 1);
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
 	atomic_set(&bat_priv->distributed_arp_table, 1);
@@ -818,7 +848,7 @@
 	int ret = -EINVAL;
 
 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
-	if (!hard_iface || hard_iface->soft_iface != NULL)
+	if (!hard_iface || hard_iface->soft_iface)
 		goto out;
 
 	ret = batadv_hardif_enable_interface(hard_iface, dev->name);
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index dbab22f..578e8a6 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -18,6 +18,17 @@
 #ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
+#include "main.h"
+
+#include <net/rtnetlink.h>
+
+struct batadv_hard_iface;
+struct batadv_orig_node;
+struct batadv_priv;
+struct batadv_softif_vlan;
+struct net_device;
+struct sk_buff;
+
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
 void batadv_interface_rx(struct net_device *soft_iface,
 			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index a75dc12..d6a312a 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -15,16 +15,35 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "sysfs.h"
-#include "translation-table.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+
 #include "distributed-arp-table.h"
-#include "network-coding.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "soft-interface.h"
-#include "gateway_common.h"
 #include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "network-coding.h"
+#include "packet.h"
+#include "soft-interface.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
 {
@@ -151,7 +170,7 @@
 	static BATADV_ATTR(_name, _mode, batadv_show_##_name,		\
 			   batadv_store_##_name)
 
-#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)	\
+#define BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func)	\
 ssize_t batadv_store_##_name(struct kobject *kobj,			\
 			     struct attribute *attr, char *buff,	\
 			     size_t count)				\
@@ -161,24 +180,24 @@
 									\
 	return __batadv_store_uint_attr(buff, count, _min, _max,	\
 					_post_func, attr,		\
-					&bat_priv->_name, net_dev);	\
+					&bat_priv->_var, net_dev);	\
 }
 
-#define BATADV_ATTR_SIF_SHOW_UINT(_name)				\
+#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var)				\
 ssize_t batadv_show_##_name(struct kobject *kobj,			\
 			    struct attribute *attr, char *buff)		\
 {									\
 	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);	\
 									\
-	return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name));	\
+	return sprintf(buff, "%i\n", atomic_read(&bat_priv->_var));	\
 }									\
 
 /* Use this, if you are going to set [name] in the soft-interface
  * (bat_priv) to an unsigned integer value
  */
-#define BATADV_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func)	\
-	static BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)\
-	static BATADV_ATTR_SIF_SHOW_UINT(_name)				\
+#define BATADV_ATTR_SIF_UINT(_name, _var, _mode, _min, _max, _post_func)\
+	static BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func)\
+	static BATADV_ATTR_SIF_SHOW_UINT(_name, _var)			\
 	static BATADV_ATTR(_name, _mode, batadv_show_##_name,		\
 			   batadv_store_##_name)
 
@@ -540,19 +559,20 @@
 static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
 static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
 		   batadv_store_gw_mode);
-BATADV_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER,
-		     INT_MAX, NULL);
-BATADV_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE,
-		     NULL);
-BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
-		     batadv_post_gw_reselect);
+BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR,
+		     2 * BATADV_JITTER, INT_MAX, NULL);
+BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0,
+		     BATADV_TQ_MAX_VALUE, NULL);
+BATADV_ATTR_SIF_UINT(gw_sel_class, gw_sel_class, S_IRUGO | S_IWUSR, 1,
+		     BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect);
 static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
 		   batadv_store_gw_bwidth);
 #ifdef CONFIG_BATMAN_ADV_MCAST
 BATADV_ATTR_SIF_BOOL(multicast_mode, S_IRUGO | S_IWUSR, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
+BATADV_ATTR_SIF_UINT(log_level, log_level, S_IRUGO | S_IWUSR, 0,
+		     BATADV_DBG_ALL, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_NC
 BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR,
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index b715b60..2294583 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -18,6 +18,16 @@
 #ifndef _NET_BATMAN_ADV_SYSFS_H_
 #define _NET_BATMAN_ADV_SYSFS_H_
 
+#include "main.h"
+
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+struct batadv_priv;
+struct batadv_softif_vlan;
+struct kobject;
+struct net_device;
+
 #define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
 #define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
 /**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 07b263a..b482495 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
@@ -15,18 +15,41 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "main.h"
 #include "translation-table.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "send.h"
-#include "hash.h"
-#include "originator.h"
-#include "routing.h"
-#include "bridge_loop_avoidance.h"
-#include "multicast.h"
+#include "main.h"
 
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
 #include <linux/crc32c.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+
+#include "bridge_loop_avoidance.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "multicast.h"
+#include "originator.h"
+#include "packet.h"
+#include "soft-interface.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_tt_local_hash_lock_class_key;
@@ -67,12 +90,8 @@
 	uint32_t hash = 0;
 
 	tt = (struct batadv_tt_common_entry *)data;
-	hash = batadv_hash_bytes(hash, &tt->addr, ETH_ALEN);
-	hash = batadv_hash_bytes(hash, &tt->vid, sizeof(tt->vid));
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
+	hash = jhash(&tt->addr, ETH_ALEN, hash);
+	hash = jhash(&tt->vid, sizeof(tt->vid), hash);
 
 	return hash % size;
 }
@@ -954,17 +973,17 @@
 				   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
 				   tt_common_entry->addr,
 				   BATADV_PRINT_VID(tt_common_entry->vid),
-				   (tt_common_entry->flags &
-				    BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
+				   ((tt_common_entry->flags &
+				     BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
 				   no_purge ? 'P' : '.',
-				   (tt_common_entry->flags &
-				    BATADV_TT_CLIENT_NEW ? 'N' : '.'),
-				   (tt_common_entry->flags &
-				    BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
-				   (tt_common_entry->flags &
-				    BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
-				   (tt_common_entry->flags &
-				    BATADV_TT_CLIENT_ISOLA ? 'I' : '.'),
+				   ((tt_common_entry->flags &
+				     BATADV_TT_CLIENT_NEW) ? 'N' : '.'),
+				   ((tt_common_entry->flags &
+				     BATADV_TT_CLIENT_PENDING) ? 'X' : '.'),
+				   ((tt_common_entry->flags &
+				     BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
+				   ((tt_common_entry->flags &
+				     BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
 				   no_purge ? 0 : last_seen_secs,
 				   no_purge ? 0 : last_seen_msecs,
 				   vlan->tt.crc);
@@ -1528,10 +1547,10 @@
 			   BATADV_PRINT_VID(tt_global_entry->common.vid),
 			   best_entry->ttvn, best_entry->orig_node->orig,
 			   last_ttvn, vlan->tt.crc,
-			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
-			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
-			   (flags & BATADV_TT_CLIENT_ISOLA ? 'I' : '.'),
-			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+			   ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
+			   ((flags & BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
+			   ((flags & BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
+			   ((flags & BATADV_TT_CLIENT_TEMP) ? 'T' : '.'));
 
 		batadv_orig_node_vlan_free_ref(vlan);
 	}
@@ -1560,10 +1579,10 @@
 			   BATADV_PRINT_VID(tt_global_entry->common.vid),
 			   orig_entry->ttvn, orig_entry->orig_node->orig,
 			   last_ttvn, vlan->tt.crc,
-			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
-			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
-			   (flags & BATADV_TT_CLIENT_ISOLA ? 'I' : '.'),
-			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+			   ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
+			   ((flags & BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
+			   ((flags & BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
+			   ((flags & BATADV_TT_CLIENT_TEMP) ? 'T' : '.'));
 
 		batadv_orig_node_vlan_free_ref(vlan);
 	}
@@ -2529,7 +2548,7 @@
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
 		   req_src, tt_data->ttvn, req_dst,
-		   (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
 
 	/* Let's get the orig node of the REAL destination */
 	req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
@@ -2660,7 +2679,7 @@
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
 		   req_src, tt_data->ttvn,
-		   (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
 
 	spin_lock_bh(&bat_priv->tt.commit_lock);
 
@@ -2899,7 +2918,7 @@
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
 		   resp_src, tt_data->ttvn, num_entries,
-		   (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
 
 	orig_node = batadv_orig_hash_find(bat_priv, resp_src);
 	if (!orig_node)
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index ad84d7b..6acc25d 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
@@ -18,6 +18,15 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_orig_node;
+struct batadv_priv;
+struct net_device;
+struct seq_file;
+
 int batadv_tt_init(struct batadv_priv *bat_priv);
 bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 			 unsigned short vid, int ifindex, uint32_t mark);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9398c3f..67d6348 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -18,9 +18,23 @@
 #ifndef _NET_BATMAN_ADV_TYPES_H_
 #define _NET_BATMAN_ADV_TYPES_H_
 
+#ifndef _NET_BATMAN_ADV_MAIN_H_
+#error only "main.h" can be included directly
+#endif
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
 #include "packet.h"
-#include "bitarray.h"
-#include <linux/kernel.h>
+
+struct seq_file;
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 
@@ -132,6 +146,7 @@
  * @timestamp: time (jiffie) of last received fragment
  * @seqno: sequence number of the fragments in the list
  * @size: accumulated size of packets in list
+ * @total_size: expected size of the assembled packet
  */
 struct batadv_frag_table_entry {
 	struct hlist_head head;
@@ -139,6 +154,7 @@
 	unsigned long timestamp;
 	uint16_t seqno;
 	uint16_t size;
+	uint16_t total_size;
 };
 
 /**
@@ -181,9 +197,10 @@
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- *  rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
+ * @bcast_own: set of bitfields (one per hard interface) where each one counts
+ * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
+ * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+ * @bcast_own_sum: sum of bcast_own
  * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
  *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
  */
@@ -1118,6 +1135,8 @@
  * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or better
  *  than neigh2 for their respective outgoing interface from the metric
  *  prospective
+ * @bat_neigh_free: free the resources allocated by the routing algorithm for a
+ *  neigh_node object
  * @bat_orig_print: print the originator table (optional)
  * @bat_orig_free: free the resources allocated by the routing algorithm for an
  *  orig_node object
@@ -1135,6 +1154,7 @@
 	void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
 	void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
 	void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
+	/* neigh_node handling API */
 	int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
 			     struct batadv_hard_iface *if_outgoing1,
 			     struct batadv_neigh_node *neigh2,
@@ -1144,6 +1164,7 @@
 		 struct batadv_hard_iface *if_outgoing1,
 		 struct batadv_neigh_node *neigh2,
 		 struct batadv_hard_iface *if_outgoing2);
+	void (*bat_neigh_free)(struct batadv_neigh_node *neigh);
 	/* orig_node handling API */
 	void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq,
 			       struct batadv_hard_iface *hard_iface);
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 1742b84..2fb7b30 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -192,7 +192,7 @@
 		if (ipv6_addr_any(nexthop))
 			return NULL;
 	} else {
-		nexthop = rt6_nexthop(rt);
+		nexthop = rt6_nexthop(rt, daddr);
 
 		/* We need to remember the address because it is needed
 		 * by bt_xmit() when sending the packet. In bt_xmit(), the
@@ -856,7 +856,7 @@
 	set_dev_addr(netdev, &chan->src, chan->src_type);
 
 	netdev->netdev_ops = &netdev_ops;
-	SET_NETDEV_DEV(netdev, &chan->conn->hcon->dev);
+	SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
 	SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
 	err = register_netdev(netdev);
@@ -928,7 +928,7 @@
 
 	unregister_netdev(entry->netdev);
 
-	/* The entry pointer is deleted in device_event() */
+	/* The entry pointer is deleted by the netdev destructor. */
 }
 
 static void chan_close_cb(struct l2cap_chan *chan)
@@ -937,7 +937,7 @@
 	struct lowpan_dev *dev = NULL;
 	struct lowpan_peer *peer;
 	int err = -ENOENT;
-	bool last = false, removed = true;
+	bool last = false, remove = true;
 
 	BT_DBG("chan %p conn %p", chan, chan->conn);
 
@@ -948,7 +948,7 @@
 		/* If conn is set, then the netdev is also there and we should
 		 * not remove it.
 		 */
-		removed = false;
+		remove = false;
 	}
 
 	spin_lock(&devices_lock);
@@ -977,7 +977,7 @@
 
 		ifdown(dev->netdev);
 
-		if (!removed) {
+		if (remove) {
 			INIT_WORK(&entry->delete_netdev, delete_netdev);
 			schedule_work(&entry->delete_netdev);
 		}
@@ -1208,8 +1208,6 @@
 
 		list_del_rcu(&peer->list);
 		kfree_rcu(peer, rcu);
-
-		module_put(THIS_MODULE);
 	}
 	spin_unlock(&devices_lock);
 }
@@ -1418,7 +1416,6 @@
 				BT_DBG("Unregistered netdev %s %p",
 				       netdev->name, netdev);
 				list_del(&entry->list);
-				kfree(entry);
 				break;
 			}
 		}
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 9a8ea23..29c12ae 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -12,9 +12,10 @@
 bluetooth_6lowpan-y := 6lowpan.o
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
-	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
+	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
 	a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
 
+bluetooth-$(CONFIG_BT_BREDR) += sco.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index bde2bdd..b5116fa 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -202,7 +202,7 @@
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index d82787d..ce86a7b 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -205,7 +205,7 @@
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ee5e598..2c48bf0 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -276,7 +276,7 @@
 }
 
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
-		      __u8 ltk[16])
+		      __u8 ltk[16], __u8 key_size)
 {
 	struct hci_dev *hdev = conn->hdev;
 	struct hci_cp_le_start_enc cp;
@@ -288,7 +288,7 @@
 	cp.handle = cpu_to_le16(conn->handle);
 	cp.rand = rand;
 	cp.ediv = ediv;
-	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
+	memcpy(cp.ltk, ltk, key_size);
 
 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
 }
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index c4802f3..2f8fb33 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -94,7 +94,6 @@
 	char buf[32];
 	size_t buf_size = min(count, (sizeof(buf)-1));
 	bool enable;
-	int err;
 
 	if (!test_bit(HCI_UP, &hdev->flags))
 		return -ENETDOWN;
@@ -121,12 +120,8 @@
 	if (IS_ERR(skb))
 		return PTR_ERR(skb);
 
-	err = -bt_to_errno(skb->data[0]);
 	kfree_skb(skb);
 
-	if (err < 0)
-		return err;
-
 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
 
 	return count;
@@ -1558,6 +1553,7 @@
 	BT_DBG("%s %p", hdev->name, hdev);
 
 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
 	    test_bit(HCI_UP, &hdev->flags)) {
 		/* Execute vendor specific shutdown routine */
 		if (hdev->shutdown)
@@ -1595,6 +1591,11 @@
 	if (hci_dev_test_flag(hdev, HCI_MGMT))
 		cancel_delayed_work_sync(&hdev->rpa_expired);
 
+	if (hdev->adv_instance_timeout) {
+		cancel_delayed_work_sync(&hdev->adv_instance_expire);
+		hdev->adv_instance_timeout = 0;
+	}
+
 	/* Avoid potential lockdep warnings from the *_flush() calls by
 	 * ensuring the workqueue is empty up front.
 	 */
@@ -2151,6 +2152,17 @@
 	mgmt_discoverable_timeout(hdev);
 }
 
+static void hci_adv_timeout_expire(struct work_struct *work)
+{
+	struct hci_dev *hdev;
+
+	hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
+
+	BT_DBG("%s", hdev->name);
+
+	mgmt_adv_timeout_expired(hdev);
+}
+
 void hci_uuids_clear(struct hci_dev *hdev)
 {
 	struct bt_uuid *uuid, *tmp;
@@ -2614,6 +2626,130 @@
 	return 0;
 }
 
+/* This function requires the caller holds hdev->lock */
+struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
+{
+	struct adv_info *adv_instance;
+
+	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
+		if (adv_instance->instance == instance)
+			return adv_instance;
+	}
+
+	return NULL;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
+	struct adv_info *cur_instance;
+
+	cur_instance = hci_find_adv_instance(hdev, instance);
+	if (!cur_instance)
+		return NULL;
+
+	if (cur_instance == list_last_entry(&hdev->adv_instances,
+					    struct adv_info, list))
+		return list_first_entry(&hdev->adv_instances,
+						 struct adv_info, list);
+	else
+		return list_next_entry(cur_instance, list);
+}
+
+/* This function requires the caller holds hdev->lock */
+int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
+{
+	struct adv_info *adv_instance;
+
+	adv_instance = hci_find_adv_instance(hdev, instance);
+	if (!adv_instance)
+		return -ENOENT;
+
+	BT_DBG("%s removing %dMR", hdev->name, instance);
+
+	if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
+		cancel_delayed_work(&hdev->adv_instance_expire);
+		hdev->adv_instance_timeout = 0;
+	}
+
+	list_del(&adv_instance->list);
+	kfree(adv_instance);
+
+	hdev->adv_instance_cnt--;
+
+	return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_adv_instances_clear(struct hci_dev *hdev)
+{
+	struct adv_info *adv_instance, *n;
+
+	if (hdev->adv_instance_timeout) {
+		cancel_delayed_work(&hdev->adv_instance_expire);
+		hdev->adv_instance_timeout = 0;
+	}
+
+	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
+		list_del(&adv_instance->list);
+		kfree(adv_instance);
+	}
+
+	hdev->adv_instance_cnt = 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+			 u16 adv_data_len, u8 *adv_data,
+			 u16 scan_rsp_len, u8 *scan_rsp_data,
+			 u16 timeout, u16 duration)
+{
+	struct adv_info *adv_instance;
+
+	adv_instance = hci_find_adv_instance(hdev, instance);
+	if (adv_instance) {
+		memset(adv_instance->adv_data, 0,
+		       sizeof(adv_instance->adv_data));
+		memset(adv_instance->scan_rsp_data, 0,
+		       sizeof(adv_instance->scan_rsp_data));
+	} else {
+		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
+		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
+			return -EOVERFLOW;
+
+		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
+		if (!adv_instance)
+			return -ENOMEM;
+
+		adv_instance->pending = true;
+		adv_instance->instance = instance;
+		list_add(&adv_instance->list, &hdev->adv_instances);
+		hdev->adv_instance_cnt++;
+	}
+
+	adv_instance->flags = flags;
+	adv_instance->adv_data_len = adv_data_len;
+	adv_instance->scan_rsp_len = scan_rsp_len;
+
+	if (adv_data_len)
+		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
+
+	if (scan_rsp_len)
+		memcpy(adv_instance->scan_rsp_data,
+		       scan_rsp_data, scan_rsp_len);
+
+	adv_instance->timeout = timeout;
+	adv_instance->remaining_time = timeout;
+
+	if (duration == 0)
+		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
+	else
+		adv_instance->duration = duration;
+
+	BT_DBG("%s for %dMR", hdev->name, instance);
+
+	return 0;
+}
+
 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
 					 bdaddr_t *bdaddr, u8 type)
 {
@@ -3019,6 +3155,9 @@
 	hdev->manufacturer = 0xffff;	/* Default to internal use */
 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
+	hdev->adv_instance_cnt = 0;
+	hdev->cur_adv_instance = 0x00;
+	hdev->adv_instance_timeout = 0;
 
 	hdev->sniff_max_interval = 800;
 	hdev->sniff_min_interval = 80;
@@ -3060,6 +3199,7 @@
 	INIT_LIST_HEAD(&hdev->pend_le_conns);
 	INIT_LIST_HEAD(&hdev->pend_le_reports);
 	INIT_LIST_HEAD(&hdev->conn_hash.list);
+	INIT_LIST_HEAD(&hdev->adv_instances);
 
 	INIT_WORK(&hdev->rx_work, hci_rx_work);
 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
@@ -3071,6 +3211,7 @@
 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
+	INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
 
 	skb_queue_head_init(&hdev->rx_q);
 	skb_queue_head_init(&hdev->cmd_q);
@@ -3082,7 +3223,6 @@
 
 	hci_init_sysfs(hdev);
 	discovery_init(hdev);
-	adv_info_init(hdev);
 
 	return hdev;
 }
@@ -3253,6 +3393,7 @@
 	hci_smp_ltks_clear(hdev);
 	hci_smp_irks_clear(hdev);
 	hci_remote_oob_data_clear(hdev);
+	hci_adv_instances_clear(hdev);
 	hci_bdaddr_list_clear(&hdev->le_white_list);
 	hci_conn_params_clear_all(hdev);
 	hci_discovery_filter_clear(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7b61be7..32363c2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2603,6 +2603,63 @@
 	hci_dev_unlock(hdev);
 }
 
+static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
+				       u16 opcode, struct sk_buff *skb)
+{
+	const struct hci_rp_read_enc_key_size *rp;
+	struct hci_conn *conn;
+	u16 handle;
+
+	BT_DBG("%s status 0x%02x", hdev->name, status);
+
+	if (!skb || skb->len < sizeof(*rp)) {
+		BT_ERR("%s invalid HCI Read Encryption Key Size response",
+		       hdev->name);
+		return;
+	}
+
+	rp = (void *)skb->data;
+	handle = le16_to_cpu(rp->handle);
+
+	hci_dev_lock(hdev);
+
+	conn = hci_conn_hash_lookup_handle(hdev, handle);
+	if (!conn)
+		goto unlock;
+
+	/* If we fail to read the encryption key size, assume maximum
+	 * (which is the same we do also when this HCI command isn't
+	 * supported.
+	 */
+	if (rp->status) {
+		BT_ERR("%s failed to read key size for handle %u", hdev->name,
+		       handle);
+		conn->enc_key_size = HCI_LINK_KEY_SIZE;
+	} else {
+		conn->enc_key_size = rp->key_size;
+	}
+
+	if (conn->state == BT_CONFIG) {
+		conn->state = BT_CONNECTED;
+		hci_connect_cfm(conn, 0);
+		hci_conn_drop(conn);
+	} else {
+		u8 encrypt;
+
+		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+			encrypt = 0x00;
+		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
+			encrypt = 0x02;
+		else
+			encrypt = 0x01;
+
+		hci_encrypt_cfm(conn, 0, encrypt);
+	}
+
+unlock:
+	hci_dev_unlock(hdev);
+}
+
 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
@@ -2650,22 +2707,51 @@
 		goto unlock;
 	}
 
+	/* In Secure Connections Only mode, do not allow any connections
+	 * that are not encrypted with AES-CCM using a P-256 authenticated
+	 * combination key.
+	 */
+	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
+	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
+	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
+		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
+		hci_conn_drop(conn);
+		goto unlock;
+	}
+
+	/* Try reading the encryption key size for encrypted ACL links */
+	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
+		struct hci_cp_read_enc_key_size cp;
+		struct hci_request req;
+
+		/* Only send HCI_Read_Encryption_Key_Size if the
+		 * controller really supports it. If it doesn't, assume
+		 * the default size (16).
+		 */
+		if (!(hdev->commands[20] & 0x10)) {
+			conn->enc_key_size = HCI_LINK_KEY_SIZE;
+			goto notify;
+		}
+
+		hci_req_init(&req, hdev);
+
+		cp.handle = cpu_to_le16(conn->handle);
+		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
+
+		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
+			BT_ERR("Sending HCI Read Encryption Key Size failed");
+			conn->enc_key_size = HCI_LINK_KEY_SIZE;
+			goto notify;
+		}
+
+		goto unlock;
+	}
+
+notify:
 	if (conn->state == BT_CONFIG) {
 		if (!ev->status)
 			conn->state = BT_CONNECTED;
 
-		/* In Secure Connections Only mode, do not allow any
-		 * connections that are not encrypted with AES-CCM
-		 * using a P-256 authenticated combination key.
-		 */
-		if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
-		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
-		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
-			hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
-			hci_conn_drop(conn);
-			goto unlock;
-		}
-
 		hci_connect_cfm(conn, ev->status);
 		hci_conn_drop(conn);
 	} else
@@ -4955,7 +5041,8 @@
 			goto not_found;
 	}
 
-	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
+	memcpy(cp.ltk, ltk->val, ltk->enc_size);
+	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
 	cp.handle = cpu_to_le16(conn->handle);
 
 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 56f9edb..f2d30d1 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -503,9 +503,9 @@
 
 	if (hdev) {
 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
-			mgmt_index_added(hdev);
-			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 			hci_dev_close(hdev->id);
+			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
+			mgmt_index_added(hdev);
 		}
 
 		atomic_dec(&hdev->promisc);
@@ -741,10 +741,11 @@
 			goto done;
 		}
 
-		if (test_bit(HCI_UP, &hdev->flags) ||
-		    test_bit(HCI_INIT, &hdev->flags) ||
+		if (test_bit(HCI_INIT, &hdev->flags) ||
 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
-		    hci_dev_test_flag(hdev, HCI_CONFIG)) {
+		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
+		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
+		     test_bit(HCI_UP, &hdev->flags))) {
 			err = -EBUSY;
 			hci_dev_put(hdev);
 			goto done;
@@ -760,10 +761,21 @@
 
 		err = hci_dev_open(hdev->id);
 		if (err) {
-			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
-			mgmt_index_added(hdev);
-			hci_dev_put(hdev);
-			goto done;
+			if (err == -EALREADY) {
+				/* In case the transport is already up and
+				 * running, clear the error here.
+				 *
+				 * This can happen when opening an user
+				 * channel and HCI_AUTO_OFF grace period
+				 * is still active.
+				 */
+				err = 0;
+			} else {
+				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
+				mgmt_index_added(hdev);
+				hci_dev_put(hdev);
+				goto done;
+			}
 		}
 
 		atomic_inc(&hdev->promisc);
@@ -1377,7 +1389,7 @@
 
 	sock->ops = &hci_sock_ops;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index cb3fdde..008ba43 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -235,7 +235,7 @@
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index dad4197..51594fb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1601,7 +1601,7 @@
 
 	hci_dev_lock(hdev);
 
-	if (user->list.next || user->list.prev) {
+	if (!list_empty(&user->list)) {
 		ret = -EINVAL;
 		goto out_unlock;
 	}
@@ -1631,12 +1631,10 @@
 
 	hci_dev_lock(hdev);
 
-	if (!user->list.next || !user->list.prev)
+	if (list_empty(&user->list))
 		goto out_unlock;
 
 	list_del(&user->list);
-	user->list.next = NULL;
-	user->list.prev = NULL;
 	user->remove(conn, user);
 
 out_unlock:
@@ -1651,8 +1649,6 @@
 	while (!list_empty(&conn->users)) {
 		user = list_first_entry(&conn->users, struct l2cap_user, list);
 		list_del(&user->list);
-		user->list.next = NULL;
-		user->list.prev = NULL;
 		user->remove(conn, user);
 	}
 }
@@ -7442,7 +7438,7 @@
 	mutex_unlock(&conn->chan_lock);
 }
 
-int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
 	struct l2cap_hdr *hdr;
@@ -7485,7 +7481,7 @@
 		if (len == skb->len) {
 			/* Complete frame received */
 			l2cap_recv_frame(conn, skb);
-			return 0;
+			return;
 		}
 
 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
@@ -7544,7 +7540,6 @@
 
 drop:
 	kfree_skb(skb);
-	return 0;
 }
 
 static struct hci_cb l2cap_cb = {
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a7278f0..2442877 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -43,7 +43,7 @@
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
-				     int proto, gfp_t prio);
+				     int proto, gfp_t prio, int kern);
 
 bool l2cap_is_socket(struct socket *sock)
 {
@@ -1193,7 +1193,7 @@
 	}
 
 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
-			      GFP_ATOMIC);
+			      GFP_ATOMIC, 0);
 	if (!sk) {
 		release_sock(parent);
 		return NULL;
@@ -1523,12 +1523,12 @@
 };
 
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
-				     int proto, gfp_t prio)
+				     int proto, gfp_t prio, int kern)
 {
 	struct sock *sk;
 	struct l2cap_chan *chan;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern);
 	if (!sk)
 		return NULL;
 
@@ -1574,7 +1574,7 @@
 
 	sock->ops = &l2cap_sock_ops;
 
-	sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+	sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7fd87e7..7998fb2 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,7 +38,7 @@
 #include "mgmt_util.h"
 
 #define MGMT_VERSION	1
-#define MGMT_REVISION	9
+#define MGMT_REVISION	10
 
 static const u16 mgmt_commands[] = {
 	MGMT_OP_READ_INDEX_LIST,
@@ -832,6 +832,20 @@
 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
 }
 
+static u8 get_current_adv_instance(struct hci_dev *hdev)
+{
+	/* The "Set Advertising" setting supersedes the "Add Advertising"
+	 * setting. Here we set the advertising data based on which
+	 * setting was set. When neither apply, default to the global settings,
+	 * represented by instance "0".
+	 */
+	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
+		return hdev->cur_adv_instance;
+
+	return 0x00;
+}
+
 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
 {
 	u8 ad_len = 0;
@@ -858,19 +872,25 @@
 	return ad_len;
 }
 
-static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
+					u8 *ptr)
 {
+	struct adv_info *adv_instance;
+
+	adv_instance = hci_find_adv_instance(hdev, instance);
+	if (!adv_instance)
+		return 0;
+
 	/* TODO: Set the appropriate entries based on advertising instance flags
 	 * here once flags other than 0 are supported.
 	 */
-	memcpy(ptr, hdev->adv_instance.scan_rsp_data,
-	       hdev->adv_instance.scan_rsp_len);
+	memcpy(ptr, adv_instance->scan_rsp_data,
+	       adv_instance->scan_rsp_len);
 
-	return hdev->adv_instance.scan_rsp_len;
+	return adv_instance->scan_rsp_len;
 }
 
-static void update_scan_rsp_data_for_instance(struct hci_request *req,
-					      u8 instance)
+static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
 {
 	struct hci_dev *hdev = req->hdev;
 	struct hci_cp_le_set_scan_rsp_data cp;
@@ -882,7 +902,7 @@
 	memset(&cp, 0, sizeof(cp));
 
 	if (instance)
-		len = create_instance_scan_rsp_data(hdev, cp.data);
+		len = create_instance_scan_rsp_data(hdev, instance, cp.data);
 	else
 		len = create_default_scan_rsp_data(hdev, cp.data);
 
@@ -900,21 +920,7 @@
 
 static void update_scan_rsp_data(struct hci_request *req)
 {
-	struct hci_dev *hdev = req->hdev;
-	u8 instance;
-
-	/* The "Set Advertising" setting supersedes the "Add Advertising"
-	 * setting. Here we set the scan response data based on which
-	 * setting was set. When neither apply, default to the global settings,
-	 * represented by instance "0".
-	 */
-	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
-	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
-		instance = 0x01;
-	else
-		instance = 0x00;
-
-	update_scan_rsp_data_for_instance(req, instance);
+	update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
 }
 
 static u8 get_adv_discov_flags(struct hci_dev *hdev)
@@ -941,20 +947,6 @@
 	return 0;
 }
 
-static u8 get_current_adv_instance(struct hci_dev *hdev)
-{
-	/* The "Set Advertising" setting supersedes the "Add Advertising"
-	 * setting. Here we set the advertising data based on which
-	 * setting was set. When neither apply, default to the global settings,
-	 * represented by instance "0".
-	 */
-	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
-	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
-		return 0x01;
-
-	return 0x00;
-}
-
 static bool get_connectable(struct hci_dev *hdev)
 {
 	struct mgmt_pending_cmd *cmd;
@@ -975,41 +967,65 @@
 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
 {
 	u32 flags;
+	struct adv_info *adv_instance;
 
-	if (instance > 0x01)
+	if (instance == 0x00) {
+		/* Instance 0 always manages the "Tx Power" and "Flags"
+		 * fields
+		 */
+		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
+		 * corresponds to the "connectable" instance flag.
+		 */
+		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
+			flags |= MGMT_ADV_FLAG_CONNECTABLE;
+
+		return flags;
+	}
+
+	adv_instance = hci_find_adv_instance(hdev, instance);
+
+	/* Return 0 when we got an invalid instance identifier. */
+	if (!adv_instance)
 		return 0;
 
-	if (instance == 0x01)
-		return hdev->adv_instance.flags;
-
-	/* Instance 0 always manages the "Tx Power" and "Flags" fields */
-	flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
-
-	/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
-	 * to the "connectable" instance flag.
-	 */
-	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
-		flags |= MGMT_ADV_FLAG_CONNECTABLE;
-
-	return flags;
+	return adv_instance->flags;
 }
 
-static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
 {
-	/* Ignore instance 0 and other unsupported instances */
-	if (instance != 0x01)
+	u8 instance = get_current_adv_instance(hdev);
+	struct adv_info *adv_instance;
+
+	/* Ignore instance 0 */
+	if (instance == 0x00)
+		return 0;
+
+	adv_instance = hci_find_adv_instance(hdev, instance);
+	if (!adv_instance)
 		return 0;
 
 	/* TODO: Take into account the "appearance" and "local-name" flags here.
 	 * These are currently being ignored as they are not supported.
 	 */
-	return hdev->adv_instance.scan_rsp_len;
+	return adv_instance->scan_rsp_len;
 }
 
 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
 {
+	struct adv_info *adv_instance = NULL;
 	u8 ad_len = 0, flags = 0;
-	u32 instance_flags = get_adv_instance_flags(hdev, instance);
+	u32 instance_flags;
+
+	/* Return 0 when the current instance identifier is invalid. */
+	if (instance) {
+		adv_instance = hci_find_adv_instance(hdev, instance);
+		if (!adv_instance)
+			return 0;
+	}
+
+	instance_flags = get_adv_instance_flags(hdev, instance);
 
 	/* The Add Advertising command allows userspace to set both the general
 	 * and limited discoverable flags.
@@ -1043,12 +1059,11 @@
 		}
 	}
 
-	if (instance) {
-		memcpy(ptr, hdev->adv_instance.adv_data,
-		       hdev->adv_instance.adv_data_len);
-
-		ad_len += hdev->adv_instance.adv_data_len;
-		ptr += hdev->adv_instance.adv_data_len;
+	if (adv_instance) {
+		memcpy(ptr, adv_instance->adv_data,
+		       adv_instance->adv_data_len);
+		ad_len += adv_instance->adv_data_len;
+		ptr += adv_instance->adv_data_len;
 	}
 
 	/* Provide Tx Power only if we can provide a valid value for it */
@@ -1065,7 +1080,7 @@
 	return ad_len;
 }
 
-static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
+static void update_inst_adv_data(struct hci_request *req, u8 instance)
 {
 	struct hci_dev *hdev = req->hdev;
 	struct hci_cp_le_set_adv_data cp;
@@ -1093,10 +1108,7 @@
 
 static void update_adv_data(struct hci_request *req)
 {
-	struct hci_dev *hdev = req->hdev;
-	u8 instance = get_current_adv_instance(hdev);
-
-	update_adv_data_for_instance(req, instance);
+	update_inst_adv_data(req, get_current_adv_instance(req->hdev));
 }
 
 int mgmt_update_adv_data(struct hci_dev *hdev)
@@ -1277,7 +1289,7 @@
 
 	if (connectable)
 		cp.type = LE_ADV_IND;
-	else if (get_adv_instance_scan_rsp_len(hdev, instance))
+	else if (get_cur_adv_instance_scan_rsp_len(hdev))
 		cp.type = LE_ADV_SCAN_IND;
 	else
 		cp.type = LE_ADV_NONCONN_IND;
@@ -1459,27 +1471,141 @@
 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
 }
 
-static void clear_adv_instance(struct hci_dev *hdev)
+static int schedule_adv_instance(struct hci_request *req, u8 instance,
+				 bool force) {
+	struct hci_dev *hdev = req->hdev;
+	struct adv_info *adv_instance = NULL;
+	u16 timeout;
+
+	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+		return -EPERM;
+
+	if (hdev->adv_instance_timeout)
+		return -EBUSY;
+
+	adv_instance = hci_find_adv_instance(hdev, instance);
+	if (!adv_instance)
+		return -ENOENT;
+
+	/* A zero timeout means unlimited advertising. As long as there is
+	 * only one instance, duration should be ignored. We still set a timeout
+	 * in case further instances are being added later on.
+	 *
+	 * If the remaining lifetime of the instance is more than the duration
+	 * then the timeout corresponds to the duration, otherwise it will be
+	 * reduced to the remaining instance lifetime.
+	 */
+	if (adv_instance->timeout == 0 ||
+	    adv_instance->duration <= adv_instance->remaining_time)
+		timeout = adv_instance->duration;
+	else
+		timeout = adv_instance->remaining_time;
+
+	/* The remaining time is being reduced unless the instance is being
+	 * advertised without time limit.
+	 */
+	if (adv_instance->timeout)
+		adv_instance->remaining_time =
+				adv_instance->remaining_time - timeout;
+
+	hdev->adv_instance_timeout = timeout;
+	queue_delayed_work(hdev->workqueue,
+			   &hdev->adv_instance_expire,
+			   msecs_to_jiffies(timeout * 1000));
+
+	/* If we're just re-scheduling the same instance again then do not
+	 * execute any HCI commands. This happens when a single instance is
+	 * being advertised.
+	 */
+	if (!force && hdev->cur_adv_instance == instance &&
+	    hci_dev_test_flag(hdev, HCI_LE_ADV))
+		return 0;
+
+	hdev->cur_adv_instance = instance;
+	update_adv_data(req);
+	update_scan_rsp_data(req);
+	enable_advertising(req);
+
+	return 0;
+}
+
+static void cancel_adv_timeout(struct hci_dev *hdev)
 {
-	struct hci_request req;
+	if (hdev->adv_instance_timeout) {
+		hdev->adv_instance_timeout = 0;
+		cancel_delayed_work(&hdev->adv_instance_expire);
+	}
+}
 
-	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
-		return;
+/* For a single instance:
+ * - force == true: The instance will be removed even when its remaining
+ *   lifetime is not zero.
+ * - force == false: the instance will be deactivated but kept stored unless
+ *   the remaining lifetime is zero.
+ *
+ * For instance == 0x00:
+ * - force == true: All instances will be removed regardless of their timeout
+ *   setting.
+ * - force == false: Only instances that have a timeout will be removed.
+ */
+static void clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
+			       u8 instance, bool force)
+{
+	struct adv_info *adv_instance, *n, *next_instance = NULL;
+	int err;
+	u8 rem_inst;
 
-	if (hdev->adv_instance.timeout)
-		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+	/* Cancel any timeout concerning the removed instance(s). */
+	if (!instance || hdev->cur_adv_instance == instance)
+		cancel_adv_timeout(hdev);
 
-	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
-	advertising_removed(NULL, hdev, 1);
-	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+	/* Get the next instance to advertise BEFORE we remove
+	 * the current one. This can be the same instance again
+	 * if there is only one instance.
+	 */
+	if (instance && hdev->cur_adv_instance == instance)
+		next_instance = hci_get_next_instance(hdev, instance);
 
-	if (!hdev_is_powered(hdev) ||
+	if (instance == 0x00) {
+		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
+					 list) {
+			if (!(force || adv_instance->timeout))
+				continue;
+
+			rem_inst = adv_instance->instance;
+			err = hci_remove_adv_instance(hdev, rem_inst);
+			if (!err)
+				advertising_removed(NULL, hdev, rem_inst);
+		}
+		hdev->cur_adv_instance = 0x00;
+	} else {
+		adv_instance = hci_find_adv_instance(hdev, instance);
+
+		if (force || (adv_instance && adv_instance->timeout &&
+			      !adv_instance->remaining_time)) {
+			/* Don't advertise a removed instance. */
+			if (next_instance &&
+			    next_instance->instance == instance)
+				next_instance = NULL;
+
+			err = hci_remove_adv_instance(hdev, instance);
+			if (!err)
+				advertising_removed(NULL, hdev, instance);
+		}
+	}
+
+	if (list_empty(&hdev->adv_instances)) {
+		hdev->cur_adv_instance = 0x00;
+		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+	}
+
+	if (!req || !hdev_is_powered(hdev) ||
 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
 		return;
 
-	hci_req_init(&req, hdev);
-	disable_advertising(&req);
-	hci_req_run(&req, NULL);
+	if (next_instance)
+		schedule_adv_instance(req, next_instance->instance, false);
 }
 
 static int clean_up_hci_state(struct hci_dev *hdev)
@@ -1497,8 +1623,7 @@
 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 	}
 
-	if (hdev->adv_instance.timeout)
-		clear_adv_instance(hdev);
+	clear_adv_instance(hdev, NULL, 0x00, false);
 
 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 		disable_advertising(&req);
@@ -2453,6 +2578,9 @@
 	val = !!cp->val;
 	enabled = lmp_host_le_capable(hdev);
 
+	if (!val)
+		clear_adv_instance(hdev, NULL, 0x00, true);
+
 	if (!hdev_is_powered(hdev) || val == enabled) {
 		bool changed = false;
 
@@ -4087,6 +4215,7 @@
 			return false;
 		}
 
+		cancel_adv_timeout(hdev);
 		disable_advertising(req);
 	}
 
@@ -4669,6 +4798,9 @@
 {
 	struct cmd_lookup match = { NULL, hdev };
 	struct hci_request req;
+	u8 instance;
+	struct adv_info *adv_instance;
+	int err;
 
 	hci_dev_lock(hdev);
 
@@ -4694,18 +4826,31 @@
 		sock_put(match.sk);
 
 	/* If "Set Advertising" was just disabled and instance advertising was
-	 * set up earlier, then enable the advertising instance.
+	 * set up earlier, then re-enable multi-instance advertising.
 	 */
 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
-	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) ||
+	    list_empty(&hdev->adv_instances))
 		goto unlock;
 
+	instance = hdev->cur_adv_instance;
+	if (!instance) {
+		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
+							struct adv_info, list);
+		if (!adv_instance)
+			goto unlock;
+
+		instance = adv_instance->instance;
+	}
+
 	hci_req_init(&req, hdev);
 
-	update_adv_data(&req);
-	enable_advertising(&req);
+	err = schedule_adv_instance(&req, instance, true);
 
-	if (hci_req_run(&req, enable_advertising_instance) < 0)
+	if (!err)
+		err = hci_req_run(&req, enable_advertising_instance);
+
+	if (err)
 		BT_ERR("Failed to re-configure advertising");
 
 unlock:
@@ -4790,10 +4935,15 @@
 	else
 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
 
+	cancel_adv_timeout(hdev);
+
 	if (val) {
-		/* Switch to instance "0" for the Set Advertising setting. */
-		update_adv_data_for_instance(&req, 0);
-		update_scan_rsp_data_for_instance(&req, 0);
+		/* Switch to instance "0" for the Set Advertising setting.
+		 * We cannot use update_[adv|scan_rsp]_data() here as the
+		 * HCI_ADVERTISING flag is not yet set.
+		 */
+		update_inst_adv_data(&req, 0x00);
+		update_inst_scan_rsp_data(&req, 0x00);
 		enable_advertising(&req);
 	} else {
 		disable_advertising(&req);
@@ -6781,8 +6931,9 @@
 {
 	struct mgmt_rp_read_adv_features *rp;
 	size_t rp_len;
-	int err;
+	int err, i;
 	bool instance;
+	struct adv_info *adv_instance;
 	u32 supported_flags;
 
 	BT_DBG("%s", hdev->name);
@@ -6795,12 +6946,9 @@
 
 	rp_len = sizeof(*rp);
 
-	/* Currently only one instance is supported, so just add 1 to the
-	 * response length.
-	 */
 	instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
 	if (instance)
-		rp_len++;
+		rp_len += hdev->adv_instance_cnt;
 
 	rp = kmalloc(rp_len, GFP_ATOMIC);
 	if (!rp) {
@@ -6813,14 +6961,18 @@
 	rp->supported_flags = cpu_to_le32(supported_flags);
 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
-	rp->max_instances = 1;
+	rp->max_instances = HCI_MAX_ADV_INSTANCES;
 
-	/* Currently only one instance is supported, so simply return the
-	 * current instance number.
-	 */
 	if (instance) {
-		rp->num_instances = 1;
-		rp->instance[0] = 1;
+		i = 0;
+		list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
+			if (i >= hdev->adv_instance_cnt)
+				break;
+
+			rp->instance[i] = adv_instance->instance;
+			i++;
+		}
+		rp->num_instances = hdev->adv_instance_cnt;
 	} else {
 		rp->num_instances = 0;
 	}
@@ -6882,7 +7034,10 @@
 				     u16 opcode)
 {
 	struct mgmt_pending_cmd *cmd;
+	struct mgmt_cp_add_advertising *cp;
 	struct mgmt_rp_add_advertising rp;
+	struct adv_info *adv_instance, *n;
+	u8 instance;
 
 	BT_DBG("status %d", status);
 
@@ -6890,16 +7045,32 @@
 
 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
 
-	if (status) {
+	if (status)
 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
-		memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
-		advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
+
+	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
+		if (!adv_instance->pending)
+			continue;
+
+		if (!status) {
+			adv_instance->pending = false;
+			continue;
+		}
+
+		instance = adv_instance->instance;
+
+		if (hdev->cur_adv_instance == instance)
+			cancel_adv_timeout(hdev);
+
+		hci_remove_adv_instance(hdev, instance);
+		advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
 	}
 
 	if (!cmd)
 		goto unlock;
 
-	rp.instance = 0x01;
+	cp = cmd->param;
+	rp.instance = cp->instance;
 
 	if (status)
 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
@@ -6914,15 +7085,28 @@
 	hci_dev_unlock(hdev);
 }
 
-static void adv_timeout_expired(struct work_struct *work)
+void mgmt_adv_timeout_expired(struct hci_dev *hdev)
 {
-	struct hci_dev *hdev = container_of(work, struct hci_dev,
-					    adv_instance.timeout_exp.work);
+	u8 instance;
+	struct hci_request req;
 
-	hdev->adv_instance.timeout = 0;
+	hdev->adv_instance_timeout = 0;
+
+	instance = get_current_adv_instance(hdev);
+	if (instance == 0x00)
+		return;
 
 	hci_dev_lock(hdev);
-	clear_adv_instance(hdev);
+	hci_req_init(&req, hdev);
+
+	clear_adv_instance(hdev, &req, instance, false);
+
+	if (list_empty(&hdev->adv_instances))
+		disable_advertising(&req);
+
+	if (!skb_queue_empty(&req.cmd_q))
+		hci_req_run(&req, NULL);
+
 	hci_dev_unlock(hdev);
 }
 
@@ -6934,7 +7118,10 @@
 	u32 flags;
 	u32 supported_flags;
 	u8 status;
-	u16 timeout;
+	u16 timeout, duration;
+	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
+	u8 schedule_instance = 0;
+	struct adv_info *next_instance;
 	int err;
 	struct mgmt_pending_cmd *cmd;
 	struct hci_request req;
@@ -6948,12 +7135,13 @@
 
 	flags = __le32_to_cpu(cp->flags);
 	timeout = __le16_to_cpu(cp->timeout);
+	duration = __le16_to_cpu(cp->duration);
 
-	/* The current implementation only supports adding one instance and only
-	 * a subset of the specified flags.
+	/* The current implementation only supports a subset of the specified
+	 * flags.
 	 */
 	supported_flags = get_supported_adv_flags(hdev);
-	if (cp->instance != 0x01 || (flags & ~supported_flags))
+	if (flags & ~supported_flags)
 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 				       MGMT_STATUS_INVALID_PARAMS);
 
@@ -6981,38 +7169,51 @@
 		goto unlock;
 	}
 
-	INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
+	err = hci_add_adv_instance(hdev, cp->instance, flags,
+				   cp->adv_data_len, cp->data,
+				   cp->scan_rsp_len,
+				   cp->data + cp->adv_data_len,
+				   timeout, duration);
+	if (err < 0) {
+		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+				      MGMT_STATUS_FAILED);
+		goto unlock;
+	}
 
-	hdev->adv_instance.flags = flags;
-	hdev->adv_instance.adv_data_len = cp->adv_data_len;
-	hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
+	/* Only trigger an advertising added event if a new instance was
+	 * actually added.
+	 */
+	if (hdev->adv_instance_cnt > prev_instance_cnt)
+		advertising_added(sk, hdev, cp->instance);
 
-	if (cp->adv_data_len)
-		memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
+	hci_dev_set_flag(hdev, HCI_ADVERTISING_INSTANCE);
 
-	if (cp->scan_rsp_len)
-		memcpy(hdev->adv_instance.scan_rsp_data,
-		       cp->data + cp->adv_data_len, cp->scan_rsp_len);
+	if (hdev->cur_adv_instance == cp->instance) {
+		/* If the currently advertised instance is being changed then
+		 * cancel the current advertising and schedule the next
+		 * instance. If there is only one instance then the overridden
+		 * advertising data will be visible right away.
+		 */
+		cancel_adv_timeout(hdev);
 
-	if (hdev->adv_instance.timeout)
-		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+		next_instance = hci_get_next_instance(hdev, cp->instance);
+		if (next_instance)
+			schedule_instance = next_instance->instance;
+	} else if (!hdev->adv_instance_timeout) {
+		/* Immediately advertise the new instance if no other
+		 * instance is currently being advertised.
+		 */
+		schedule_instance = cp->instance;
+	}
 
-	hdev->adv_instance.timeout = timeout;
-
-	if (timeout)
-		queue_delayed_work(hdev->workqueue,
-				   &hdev->adv_instance.timeout_exp,
-				   msecs_to_jiffies(timeout * 1000));
-
-	if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
-		advertising_added(sk, hdev, 1);
-
-	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
-	 * we have no HCI communication to make. Simply return.
+	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
+	 * there is no instance to be advertised then we have no HCI
+	 * communication to make. Simply return.
 	 */
 	if (!hdev_is_powered(hdev) ||
-	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
-		rp.instance = 0x01;
+	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+	    !schedule_instance) {
+		rp.instance = cp->instance;
 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
 		goto unlock;
@@ -7030,11 +7231,11 @@
 
 	hci_req_init(&req, hdev);
 
-	update_adv_data(&req);
-	update_scan_rsp_data(&req);
-	enable_advertising(&req);
+	err = schedule_adv_instance(&req, schedule_instance, true);
 
-	err = hci_req_run(&req, add_advertising_complete);
+	if (!err)
+		err = hci_req_run(&req, add_advertising_complete);
+
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
@@ -7048,6 +7249,7 @@
 					u16 opcode)
 {
 	struct mgmt_pending_cmd *cmd;
+	struct mgmt_cp_remove_advertising *cp;
 	struct mgmt_rp_remove_advertising rp;
 
 	BT_DBG("status %d", status);
@@ -7062,7 +7264,8 @@
 	if (!cmd)
 		goto unlock;
 
-	rp.instance = 1;
+	cp = cmd->param;
+	rp.instance = cp->instance;
 
 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
 			  &rp, sizeof(rp));
@@ -7077,21 +7280,21 @@
 {
 	struct mgmt_cp_remove_advertising *cp = data;
 	struct mgmt_rp_remove_advertising rp;
-	int err;
 	struct mgmt_pending_cmd *cmd;
 	struct hci_request req;
+	int err;
 
 	BT_DBG("%s", hdev->name);
 
-	/* The current implementation only allows modifying instance no 1. A
-	 * value of 0 indicates that all instances should be cleared.
-	 */
-	if (cp->instance > 1)
-		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
-				       MGMT_STATUS_INVALID_PARAMS);
-
 	hci_dev_lock(hdev);
 
+	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
+		err = mgmt_cmd_status(sk, hdev->id,
+				      MGMT_OP_REMOVE_ADVERTISING,
+				      MGMT_STATUS_INVALID_PARAMS);
+		goto unlock;
+	}
+
 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
 	    pending_find(MGMT_OP_SET_LE, hdev)) {
@@ -7106,21 +7309,21 @@
 		goto unlock;
 	}
 
-	if (hdev->adv_instance.timeout)
-		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+	hci_req_init(&req, hdev);
 
-	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
+	clear_adv_instance(hdev, &req, cp->instance, true);
 
-	advertising_removed(sk, hdev, 1);
+	if (list_empty(&hdev->adv_instances))
+		disable_advertising(&req);
 
-	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
-
-	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
-	 * we have no HCI communication to make. Simply return.
+	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
+	 * flag is set or the device isn't powered then we have no HCI
+	 * communication to make. Simply return.
 	 */
-	if (!hdev_is_powered(hdev) ||
+	if (skb_queue_empty(&req.cmd_q) ||
+	    !hdev_is_powered(hdev) ||
 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
-		rp.instance = 1;
+		rp.instance = cp->instance;
 		err = mgmt_cmd_complete(sk, hdev->id,
 					MGMT_OP_REMOVE_ADVERTISING,
 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
@@ -7134,9 +7337,6 @@
 		goto unlock;
 	}
 
-	hci_req_init(&req, hdev);
-	disable_advertising(&req);
-
 	err = hci_req_run(&req, remove_advertising_complete);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
@@ -7361,6 +7561,7 @@
 static int powered_update_hci(struct hci_dev *hdev)
 {
 	struct hci_request req;
+	struct adv_info *adv_instance;
 	u8 link_sec;
 
 	hci_req_init(&req, hdev);
@@ -7400,14 +7601,27 @@
 		 * advertising data. This also applies to the case
 		 * where BR/EDR was toggled during the AUTO_OFF phase.
 		 */
-		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
+		    (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+		     !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))) {
 			update_adv_data(&req);
 			update_scan_rsp_data(&req);
 		}
 
-		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
-		    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+		    hdev->cur_adv_instance == 0x00 &&
+		    !list_empty(&hdev->adv_instances)) {
+			adv_instance = list_first_entry(&hdev->adv_instances,
+							struct adv_info, list);
+			hdev->cur_adv_instance = adv_instance->instance;
+		}
+
+		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
 			enable_advertising(&req);
+		else if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+			 hdev->cur_adv_instance)
+			schedule_adv_instance(&req, hdev->cur_adv_instance,
+					      true);
 
 		restart_le_actions(&req);
 	}
@@ -7577,7 +7791,7 @@
 	memset(&ev, 0, sizeof(ev));
 
 	/* Devices using resolvable or non-resolvable random addresses
-	 * without providing an indentity resolving key don't require
+	 * without providing an identity resolving key don't require
 	 * to store long term keys. Their addresses will change the
 	 * next time around.
 	 *
@@ -7603,7 +7817,12 @@
 	if (key->type == SMP_LTK)
 		ev.key.master = 1;
 
-	memcpy(ev.key.val, key->val, sizeof(key->val));
+	/* Make sure we copy only the significant bytes based on the
+	 * encryption key size, and set the rest of the value to zeroes.
+	 */
+	memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+	memset(ev.key.val + key->enc_size, 0,
+	       sizeof(ev.key.val) - key->enc_size);
 
 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
 }
@@ -7617,7 +7836,7 @@
 	/* For identity resolving keys from devices that are already
 	 * using a public address or static random address, do not
 	 * ask for storing this key. The identity resolving key really
-	 * is only mandatory for devices using resovlable random
+	 * is only mandatory for devices using resolvable random
 	 * addresses.
 	 *
 	 * Storing all identity resolving keys has the downside that
@@ -7646,7 +7865,7 @@
 	memset(&ev, 0, sizeof(ev));
 
 	/* Devices using resolvable or non-resolvable random addresses
-	 * without providing an indentity resolving key don't require
+	 * without providing an identity resolving key don't require
 	 * to store signature resolving keys. Their addresses will change
 	 * the next time around.
 	 *
@@ -8387,13 +8606,24 @@
 void mgmt_reenable_advertising(struct hci_dev *hdev)
 {
 	struct hci_request req;
+	u8 instance;
 
 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
 		return;
 
+	instance = get_current_adv_instance(hdev);
+
 	hci_req_init(&req, hdev);
-	enable_advertising(&req);
+
+	if (instance) {
+		schedule_adv_instance(&req, instance, true);
+	} else {
+		update_adv_data(&req);
+		update_scan_rsp_data(&req);
+		enable_advertising(&req);
+	}
+
 	hci_req_run(&req, adv_enable_complete);
 }
 
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4fea242..29709fb 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -200,7 +200,7 @@
 
 	BT_DBG("");
 
-	err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
+	err = sock_create_kern(&init_net, PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
 	if (!err) {
 		struct sock *sk = (*sock)->sk;
 		sk->sk_data_ready   = rfcomm_l2data_ready;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 825e8fb..7511df7 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -269,12 +269,12 @@
 	.obj_size	= sizeof(struct rfcomm_pinfo)
 };
 
-static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
 {
 	struct rfcomm_dlc *d;
 	struct sock *sk;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
 	if (!sk)
 		return NULL;
 
@@ -324,7 +324,7 @@
 
 	sock->ops = &rfcomm_sock_ops;
 
-	sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+	sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
 	if (!sk)
 		return -ENOMEM;
 
@@ -334,16 +334,19 @@
 
 static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 {
-	struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
+	struct sockaddr_rc sa;
 	struct sock *sk = sock->sk;
-	int chan = sa->rc_channel;
-	int err = 0;
-
-	BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
+	int len, err = 0;
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
 
+	memset(&sa, 0, sizeof(sa));
+	len = min_t(unsigned int, sizeof(sa), addr_len);
+	memcpy(&sa, addr, len);
+
+	BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr);
+
 	lock_sock(sk);
 
 	if (sk->sk_state != BT_OPEN) {
@@ -358,12 +361,13 @@
 
 	write_lock(&rfcomm_sk_list.lock);
 
-	if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {
+	if (sa.rc_channel &&
+	    __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) {
 		err = -EADDRINUSE;
 	} else {
 		/* Save source address */
-		bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
-		rfcomm_pi(sk)->channel = chan;
+		bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr);
+		rfcomm_pi(sk)->channel = sa.rc_channel;
 		sk->sk_state = BT_BOUND;
 	}
 
@@ -969,7 +973,7 @@
 		goto done;
 	}
 
-	sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
+	sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0);
 	if (!sk)
 		goto done;
 
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 4322c83..688a040 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -460,11 +460,11 @@
 	.obj_size	= sizeof(struct sco_pinfo)
 };
 
-static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
 {
 	struct sock *sk;
 
-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
+	sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
 	if (!sk)
 		return NULL;
 
@@ -501,7 +501,7 @@
 
 	sock->ops = &sco_sock_ops;
 
-	sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+	sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
 	if (!sk)
 		return -ENOMEM;
 
@@ -1026,7 +1026,7 @@
 		bh_lock_sock(parent);
 
 		sk = sco_sock_alloc(sock_net(parent), NULL,
-				    BTPROTO_SCO, GFP_ATOMIC);
+				    BTPROTO_SCO, GFP_ATOMIC, 0);
 		if (!sk) {
 			bh_unlock_sock(parent);
 			sco_conn_unlock(conn);
@@ -1110,7 +1110,7 @@
 	sco_conn_del(hcon, bt_to_errno(reason));
 }
 
-int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
 {
 	struct sco_conn *conn = hcon->sco_data;
 
@@ -1121,12 +1121,11 @@
 
 	if (skb->len) {
 		sco_recv_frame(conn, skb);
-		return 0;
+		return;
 	}
 
 drop:
 	kfree_skb(skb);
-	return 0;
 }
 
 static struct hci_cb sco_cb = {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1ab3dc9..3d0f7d2 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -33,6 +33,9 @@
 #include "ecc.h"
 #include "smp.h"
 
+#define SMP_DEV(hdev) \
+	((struct smp_dev *)((struct l2cap_chan *)((hdev)->smp_data))->data)
+
 /* Low-level debug macros to be used for stuff that we don't want
  * accidentially in dmesg, i.e. the values of the various crypto keys
  * and the inputs & outputs of crypto functions.
@@ -81,6 +84,9 @@
 	u8			local_rand[16];
 	bool			debug_key;
 
+	u8			min_key_size;
+	u8			max_key_size;
+
 	struct crypto_blkcipher	*tfm_aes;
 	struct crypto_hash	*tfm_cmac;
 };
@@ -371,6 +377,8 @@
 	uint8_t tmp[16], data[16];
 	int err;
 
+	SMP_DBG("k %16phN r %16phN", k, r);
+
 	if (!tfm) {
 		BT_ERR("tfm %p", tfm);
 		return -EINVAL;
@@ -400,6 +408,8 @@
 	/* Most significant octet of encryptedData corresponds to data[0] */
 	swap_buf(data, r, 16);
 
+	SMP_DBG("r %16phN", r);
+
 	return err;
 }
 
@@ -410,6 +420,10 @@
 	u8 p1[16], p2[16];
 	int err;
 
+	SMP_DBG("k %16phN r %16phN", k, r);
+	SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra);
+	SMP_DBG("preq %7phN pres %7phN", preq, pres);
+
 	memset(p1, 0, 16);
 
 	/* p1 = pres || preq || _rat || _iat */
@@ -418,10 +432,7 @@
 	memcpy(p1 + 2, preq, 7);
 	memcpy(p1 + 9, pres, 7);
 
-	/* p2 = padding || ia || ra */
-	memcpy(p2, ra, 6);
-	memcpy(p2 + 6, ia, 6);
-	memset(p2 + 12, 0, 4);
+	SMP_DBG("p1 %16phN", p1);
 
 	/* res = r XOR p1 */
 	u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
@@ -433,6 +444,13 @@
 		return err;
 	}
 
+	/* p2 = padding || ia || ra */
+	memcpy(p2, ra, 6);
+	memcpy(p2 + 6, ia, 6);
+	memset(p2 + 12, 0, 4);
+
+	SMP_DBG("p2 %16phN", p2);
+
 	/* res = res XOR p2 */
 	u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
 
@@ -696,7 +714,7 @@
 	if (rsp == NULL) {
 		req->io_capability = conn->hcon->io_capability;
 		req->oob_flag = oob_flag;
-		req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+		req->max_key_size = SMP_DEV(hdev)->max_key_size;
 		req->init_key_dist = local_dist;
 		req->resp_key_dist = remote_dist;
 		req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
@@ -707,7 +725,7 @@
 
 	rsp->io_capability = conn->hcon->io_capability;
 	rsp->oob_flag = oob_flag;
-	rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+	rsp->max_key_size = SMP_DEV(hdev)->max_key_size;
 	rsp->init_key_dist = req->init_key_dist & remote_dist;
 	rsp->resp_key_dist = req->resp_key_dist & local_dist;
 	rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev));
@@ -718,10 +736,11 @@
 static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
 {
 	struct l2cap_chan *chan = conn->smp;
+	struct hci_dev *hdev = conn->hcon->hdev;
 	struct smp_chan *smp = chan->data;
 
-	if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
-	    (max_key_size < SMP_MIN_ENC_KEY_SIZE))
+	if (max_key_size > SMP_DEV(hdev)->max_key_size ||
+	    max_key_size < SMP_MIN_ENC_KEY_SIZE)
 		return SMP_ENC_KEY_SIZE;
 
 	smp->enc_key_size = max_key_size;
@@ -985,13 +1004,10 @@
 
 		smp_s1(smp->tfm_aes, smp->tk, smp->rrnd, smp->prnd, stk);
 
-		memset(stk + smp->enc_key_size, 0,
-		       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
-
 		if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
 			return SMP_UNSPECIFIED;
 
-		hci_le_start_enc(hcon, ediv, rand, stk);
+		hci_le_start_enc(hcon, ediv, rand, stk, smp->enc_key_size);
 		hcon->enc_key_size = smp->enc_key_size;
 		set_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
 	} else {
@@ -1004,9 +1020,6 @@
 
 		smp_s1(smp->tfm_aes, smp->tk, smp->prnd, smp->rrnd, stk);
 
-		memset(stk + smp->enc_key_size, 0,
-		       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
-
 		if (hcon->pending_sec_level == BT_SECURITY_HIGH)
 			auth = 1;
 		else
@@ -1144,9 +1157,6 @@
 	else
 		auth = 0;
 
-	memset(smp->tk + smp->enc_key_size, 0,
-	       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
-
 	smp->ltk = hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
 			       key_type, auth, smp->tk, smp->enc_key_size,
 			       0, 0);
@@ -1268,7 +1278,14 @@
 		__le16 ediv;
 		__le64 rand;
 
-		get_random_bytes(enc.ltk, sizeof(enc.ltk));
+		/* Make sure we generate only the significant amount of
+		 * bytes based on the encryption key size, and set the rest
+		 * of the value to zeroes.
+		 */
+		get_random_bytes(enc.ltk, smp->enc_key_size);
+		memset(enc.ltk + smp->enc_key_size, 0,
+		       sizeof(enc.ltk) - smp->enc_key_size);
+
 		get_random_bytes(&ediv, sizeof(ediv));
 		get_random_bytes(&rand, sizeof(rand));
 
@@ -1688,7 +1705,7 @@
 
 		req->init_key_dist   = local_dist;
 		req->resp_key_dist   = remote_dist;
-		req->max_key_size    = SMP_MAX_ENC_KEY_SIZE;
+		req->max_key_size    = conn->hcon->enc_key_size;
 
 		smp->remote_key_dist = remote_dist;
 
@@ -1697,7 +1714,7 @@
 
 	memset(rsp, 0, sizeof(*rsp));
 
-	rsp->max_key_size    = SMP_MAX_ENC_KEY_SIZE;
+	rsp->max_key_size    = conn->hcon->enc_key_size;
 	rsp->init_key_dist   = req->init_key_dist & remote_dist;
 	rsp->resp_key_dist   = req->resp_key_dist & local_dist;
 
@@ -2190,7 +2207,7 @@
 	if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
 		return true;
 
-	hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
+	hci_le_start_enc(hcon, key->ediv, key->rand, key->val, key->enc_size);
 	hcon->enc_key_size = key->enc_size;
 
 	/* We never store STKs for master role, so clear this flag */
@@ -2738,7 +2755,7 @@
 	sc_add_ltk(smp);
 
 	if (hcon->out) {
-		hci_le_start_enc(hcon, 0, 0, smp->tk);
+		hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size);
 		hcon->enc_key_size = smp->enc_key_size;
 	}
 
@@ -3120,6 +3137,8 @@
 
 	smp->tfm_aes = tfm_aes;
 	smp->tfm_cmac = tfm_cmac;
+	smp->min_key_size = SMP_MIN_ENC_KEY_SIZE;
+	smp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
 
 create_chan:
 	chan = l2cap_chan_create();
@@ -3242,6 +3261,94 @@
 	.llseek		= default_llseek,
 };
 
+static ssize_t le_min_key_size_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[4];
+
+	snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->min_key_size);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t le_min_key_size_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[32];
+	size_t buf_size = min(count, (sizeof(buf) - 1));
+	u8 key_size;
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	sscanf(buf, "%hhu", &key_size);
+
+	if (key_size > SMP_DEV(hdev)->max_key_size ||
+	    key_size < SMP_MIN_ENC_KEY_SIZE)
+		return -EINVAL;
+
+	SMP_DEV(hdev)->min_key_size = key_size;
+
+	return count;
+}
+
+static const struct file_operations le_min_key_size_fops = {
+	.open		= simple_open,
+	.read		= le_min_key_size_read,
+	.write		= le_min_key_size_write,
+	.llseek		= default_llseek,
+};
+
+static ssize_t le_max_key_size_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[4];
+
+	snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->max_key_size);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t le_max_key_size_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[32];
+	size_t buf_size = min(count, (sizeof(buf) - 1));
+	u8 key_size;
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	sscanf(buf, "%hhu", &key_size);
+
+	if (key_size > SMP_MAX_ENC_KEY_SIZE ||
+	    key_size < SMP_DEV(hdev)->min_key_size)
+		return -EINVAL;
+
+	SMP_DEV(hdev)->max_key_size = key_size;
+
+	return count;
+}
+
+static const struct file_operations le_max_key_size_fops = {
+	.open		= simple_open,
+	.read		= le_max_key_size_read,
+	.write		= le_max_key_size_write,
+	.llseek		= default_llseek,
+};
+
 int smp_register(struct hci_dev *hdev)
 {
 	struct l2cap_chan *chan;
@@ -3266,6 +3373,11 @@
 
 	hdev->smp_data = chan;
 
+	debugfs_create_file("le_min_key_size", 0644, hdev->debugfs, hdev,
+			    &le_min_key_size_fops);
+	debugfs_create_file("le_max_key_size", 0644, hdev->debugfs, hdev,
+			    &le_max_key_size_fops);
+
 	/* If the controller does not support BR/EDR Secure Connections
 	 * feature, then the BR/EDR SMP channel shall not be present.
 	 *
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index fd7ee03..a1cda5d 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -12,6 +12,8 @@
 
 bridge-$(subst m,y,$(CONFIG_BRIDGE_NETFILTER)) += br_nf_core.o
 
+br_netfilter-y := br_netfilter_hooks.o
+br_netfilter-$(subst m,y,$(CONFIG_IPV6)) += br_netfilter_ipv6.o
 obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
 
 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 02c24cf..a1abe49 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -121,13 +121,13 @@
 	.notifier_call = br_device_event
 };
 
-static int br_netdev_switch_event(struct notifier_block *unused,
-				  unsigned long event, void *ptr)
+static int br_switchdev_event(struct notifier_block *unused,
+			      unsigned long event, void *ptr)
 {
-	struct net_device *dev = netdev_switch_notifier_info_to_dev(ptr);
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 	struct net_bridge_port *p;
 	struct net_bridge *br;
-	struct netdev_switch_notifier_fdb_info *fdb_info;
+	struct switchdev_notifier_fdb_info *fdb_info;
 	int err = NOTIFY_DONE;
 
 	rtnl_lock();
@@ -138,14 +138,14 @@
 	br = p->br;
 
 	switch (event) {
-	case NETDEV_SWITCH_FDB_ADD:
+	case SWITCHDEV_FDB_ADD:
 		fdb_info = ptr;
 		err = br_fdb_external_learn_add(br, p, fdb_info->addr,
 						fdb_info->vid);
 		if (err)
 			err = notifier_from_errno(err);
 		break;
-	case NETDEV_SWITCH_FDB_DEL:
+	case SWITCHDEV_FDB_DEL:
 		fdb_info = ptr;
 		err = br_fdb_external_learn_del(br, p, fdb_info->addr,
 						fdb_info->vid);
@@ -159,8 +159,8 @@
 	return err;
 }
 
-static struct notifier_block br_netdev_switch_notifier = {
-	.notifier_call = br_netdev_switch_event,
+static struct notifier_block br_switchdev_notifier = {
+	.notifier_call = br_switchdev_event,
 };
 
 static void __net_exit br_net_exit(struct net *net)
@@ -214,7 +214,7 @@
 	if (err)
 		goto err_out3;
 
-	err = register_netdev_switch_notifier(&br_netdev_switch_notifier);
+	err = register_switchdev_notifier(&br_switchdev_notifier);
 	if (err)
 		goto err_out4;
 
@@ -235,7 +235,7 @@
 	return 0;
 
 err_out5:
-	unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
+	unregister_switchdev_notifier(&br_switchdev_notifier);
 err_out4:
 	unregister_netdevice_notifier(&br_device_notifier);
 err_out3:
@@ -253,7 +253,7 @@
 {
 	stp_proto_unregister(&br_stp_proto);
 	br_netlink_fini();
-	unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
+	unregister_switchdev_notifier(&br_switchdev_notifier);
 	unregister_netdevice_notifier(&br_device_notifier);
 	brioctl_set(NULL);
 	unregister_pernet_subsys(&br_net_ops);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 659fb96..9e9875d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -24,6 +24,7 @@
 #include <linux/atomic.h>
 #include <asm/unaligned.h>
 #include <linux/if_vlan.h>
+#include <net/switchdev.h>
 #include "br_private.h"
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
@@ -130,11 +131,27 @@
 	}
 }
 
+static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
+{
+	struct switchdev_obj obj = {
+		.id = SWITCHDEV_OBJ_PORT_FDB,
+		.u.fdb = {
+			.addr = f->addr.addr,
+			.vid = f->vlan_id,
+		},
+	};
+
+	switchdev_port_obj_del(f->dst->dev, &obj);
+}
+
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
 	if (f->is_static)
 		fdb_del_hw_addr(br, f->addr.addr);
 
+	if (f->added_by_external_learn)
+		fdb_del_external_learn(f);
+
 	hlist_del_rcu(&f->hlist);
 	fdb_notify(br, f, RTM_DELNEIGH);
 	call_rcu(&f->rcu, fdb_rcu_free);
@@ -313,9 +330,11 @@
 
 /* Flush all entries referring to a specific port.
  * if do_all is set also flush static entries
+ * if vid is set delete all entries that match the vlan_id
  */
 void br_fdb_delete_by_port(struct net_bridge *br,
 			   const struct net_bridge_port *p,
+			   u16 vid,
 			   int do_all)
 {
 	int i;
@@ -330,8 +349,9 @@
 			if (f->dst != p)
 				continue;
 
-			if (f->is_static && !do_all)
-				continue;
+			if (!do_all)
+				if (f->is_static || (vid && f->vlan_id != vid))
+					continue;
 
 			if (f->is_local)
 				fdb_delete_local(br, p, f);
@@ -736,6 +756,12 @@
 	struct net_bridge_fdb_entry *fdb;
 	bool modified = false;
 
+	/* If the port cannot learn allow only local and static entries */
+	if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+	    !(source->state == BR_STATE_LEARNING ||
+	      source->state == BR_STATE_FORWARDING))
+		return -EPERM;
+
 	fdb = fdb_find(head, addr, vid);
 	if (fdb == NULL) {
 		if (!(flags & NLM_F_CREATE))
@@ -867,13 +893,15 @@
 	return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
+static int fdb_delete_by_addr_and_port(struct net_bridge_port *p,
+				       const u8 *addr, u16 vlan)
 {
+	struct net_bridge *br = p->br;
 	struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
 	struct net_bridge_fdb_entry *fdb;
 
 	fdb = fdb_find(head, addr, vlan);
-	if (!fdb)
+	if (!fdb || fdb->dst != p)
 		return -ENOENT;
 
 	fdb_delete(br, fdb);
@@ -886,7 +914,7 @@
 	int err;
 
 	spin_lock_bh(&p->br->hash_lock);
-	err = fdb_delete_by_addr(p->br, addr, vid);
+	err = fdb_delete_by_addr_and_port(p, addr, vid);
 	spin_unlock_bh(&p->br->hash_lock);
 
 	return err;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1849d96..a538cb1 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -249,7 +249,7 @@
 	list_del_rcu(&p->list);
 
 	nbp_vlan_flush(p);
-	br_fdb_delete_by_port(br, p, 1);
+	br_fdb_delete_by_port(br, p, 0, 1);
 	nbp_update_port_count(br);
 
 	netdev_upper_dev_unlink(dev, br->dev);
@@ -278,7 +278,7 @@
 		del_nbp(p);
 	}
 
-	br_fdb_delete_by_port(br, NULL, 1);
+	br_fdb_delete_by_port(br, NULL, 0, 1);
 
 	br_vlan_flush(br);
 	del_timer_sync(&br->gc_timer);
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index a9a4a1b..8d423bc 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -247,9 +247,7 @@
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		spin_lock_bh(&br->lock);
 		br_stp_set_bridge_priority(br, args[1]);
-		spin_unlock_bh(&br->lock);
 		return 0;
 
 	case BRCTL_SET_PORT_PRIORITY:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ff667e1..742a6c2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,6 +37,8 @@
 
 static void br_multicast_start_querier(struct net_bridge *br,
 				       struct bridge_mcast_own_query *query);
+static void br_multicast_add_router(struct net_bridge *br,
+				    struct net_bridge_port *port);
 unsigned int br_mdb_rehash_seq;
 
 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -936,6 +938,8 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	br_multicast_enable(&port->ip6_own_query);
 #endif
+	if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
+		br_multicast_add_router(br, port);
 
 out:
 	spin_unlock(&br->multicast_lock);
@@ -975,9 +979,6 @@
 	int err = 0;
 	__be32 group;
 
-	if (!pskb_may_pull(skb, sizeof(*ih)))
-		return -EINVAL;
-
 	ih = igmpv3_report_hdr(skb);
 	num = ntohs(ih->ngrec);
 	len = sizeof(*ih);
@@ -1247,25 +1248,14 @@
 			max_delay = 10 * HZ;
 			group = 0;
 		}
-	} else {
-		if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
-			err = -EINVAL;
-			goto out;
-		}
-
+	} else if (skb->len >= sizeof(*ih3)) {
 		ih3 = igmpv3_query_hdr(skb);
 		if (ih3->nsrcs)
 			goto out;
 
 		max_delay = ih3->code ?
 			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
-	}
-
-	/* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
-	 * all-systems destination addresses (224.0.0.1) for general queries
-	 */
-	if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
-		err = -EINVAL;
+	} else {
 		goto out;
 	}
 
@@ -1328,12 +1318,6 @@
 	    (port && port->state == BR_STATE_DISABLED))
 		goto out;
 
-	/* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
-	if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
-		err = -EINVAL;
-		goto out;
-	}
-
 	if (skb->len == sizeof(*mld)) {
 		if (!pskb_may_pull(skb, sizeof(*mld))) {
 			err = -EINVAL;
@@ -1357,14 +1341,6 @@
 
 	is_general_query = group && ipv6_addr_any(group);
 
-	/* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
-	 * all-nodes destination address (ff02::1) for general queries
-	 */
-	if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
-		err = -EINVAL;
-		goto out;
-	}
-
 	if (is_general_query) {
 		saddr.proto = htons(ETH_P_IPV6);
 		saddr.u.ip6 = ip6h->saddr;
@@ -1556,74 +1532,22 @@
 				 struct sk_buff *skb,
 				 u16 vid)
 {
-	struct sk_buff *skb2 = skb;
-	const struct iphdr *iph;
+	struct sk_buff *skb_trimmed = NULL;
 	struct igmphdr *ih;
-	unsigned int len;
-	unsigned int offset;
 	int err;
 
-	/* We treat OOM as packet loss for now. */
-	if (!pskb_may_pull(skb, sizeof(*iph)))
-		return -EINVAL;
+	err = ip_mc_check_igmp(skb, &skb_trimmed);
 
-	iph = ip_hdr(skb);
-
-	if (iph->ihl < 5 || iph->version != 4)
-		return -EINVAL;
-
-	if (!pskb_may_pull(skb, ip_hdrlen(skb)))
-		return -EINVAL;
-
-	iph = ip_hdr(skb);
-
-	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
-		return -EINVAL;
-
-	if (iph->protocol != IPPROTO_IGMP) {
-		if (!ipv4_is_local_multicast(iph->daddr))
+	if (err == -ENOMSG) {
+		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr))
 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
 		return 0;
+	} else if (err < 0) {
+		return err;
 	}
 
-	len = ntohs(iph->tot_len);
-	if (skb->len < len || len < ip_hdrlen(skb))
-		return -EINVAL;
-
-	if (skb->len > len) {
-		skb2 = skb_clone(skb, GFP_ATOMIC);
-		if (!skb2)
-			return -ENOMEM;
-
-		err = pskb_trim_rcsum(skb2, len);
-		if (err)
-			goto err_out;
-	}
-
-	len -= ip_hdrlen(skb2);
-	offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
-	__skb_pull(skb2, offset);
-	skb_reset_transport_header(skb2);
-
-	err = -EINVAL;
-	if (!pskb_may_pull(skb2, sizeof(*ih)))
-		goto out;
-
-	switch (skb2->ip_summed) {
-	case CHECKSUM_COMPLETE:
-		if (!csum_fold(skb2->csum))
-			break;
-		/* fall through */
-	case CHECKSUM_NONE:
-		skb2->csum = 0;
-		if (skb_checksum_complete(skb2))
-			goto out;
-	}
-
-	err = 0;
-
 	BR_INPUT_SKB_CB(skb)->igmp = 1;
-	ih = igmp_hdr(skb2);
+	ih = igmp_hdr(skb);
 
 	switch (ih->type) {
 	case IGMP_HOST_MEMBERSHIP_REPORT:
@@ -1632,21 +1556,19 @@
 		err = br_ip4_multicast_add_group(br, port, ih->group, vid);
 		break;
 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
-		err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
+		err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
 		break;
 	case IGMP_HOST_MEMBERSHIP_QUERY:
-		err = br_ip4_multicast_query(br, port, skb2, vid);
+		err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
 		break;
 	case IGMP_HOST_LEAVE_MESSAGE:
 		br_ip4_multicast_leave_group(br, port, ih->group, vid);
 		break;
 	}
 
-out:
-	__skb_push(skb2, offset);
-err_out:
-	if (skb2 != skb)
-		kfree_skb(skb2);
+	if (skb_trimmed)
+		kfree_skb(skb_trimmed);
+
 	return err;
 }
 
@@ -1656,138 +1578,42 @@
 				 struct sk_buff *skb,
 				 u16 vid)
 {
-	struct sk_buff *skb2;
-	const struct ipv6hdr *ip6h;
-	u8 icmp6_type;
-	u8 nexthdr;
-	__be16 frag_off;
-	unsigned int len;
-	int offset;
+	struct sk_buff *skb_trimmed = NULL;
+	struct mld_msg *mld;
 	int err;
 
-	if (!pskb_may_pull(skb, sizeof(*ip6h)))
-		return -EINVAL;
+	err = ipv6_mc_check_mld(skb, &skb_trimmed);
 
-	ip6h = ipv6_hdr(skb);
-
-	/*
-	 * We're interested in MLD messages only.
-	 *  - Version is 6
-	 *  - MLD has always Router Alert hop-by-hop option
-	 *  - But we do not support jumbrograms.
-	 */
-	if (ip6h->version != 6)
+	if (err == -ENOMSG) {
+		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
 		return 0;
-
-	/* Prevent flooding this packet if there is no listener present */
-	if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
-		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-
-	if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
-	    ip6h->payload_len == 0)
-		return 0;
-
-	len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
-	if (skb->len < len)
-		return -EINVAL;
-
-	nexthdr = ip6h->nexthdr;
-	offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
-
-	if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
-		return 0;
-
-	/* Okay, we found ICMPv6 header */
-	skb2 = skb_clone(skb, GFP_ATOMIC);
-	if (!skb2)
-		return -ENOMEM;
-
-	err = -EINVAL;
-	if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
-		goto out;
-
-	len -= offset - skb_network_offset(skb2);
-
-	__skb_pull(skb2, offset);
-	skb_reset_transport_header(skb2);
-	skb_postpull_rcsum(skb2, skb_network_header(skb2),
-			   skb_network_header_len(skb2));
-
-	icmp6_type = icmp6_hdr(skb2)->icmp6_type;
-
-	switch (icmp6_type) {
-	case ICMPV6_MGM_QUERY:
-	case ICMPV6_MGM_REPORT:
-	case ICMPV6_MGM_REDUCTION:
-	case ICMPV6_MLD2_REPORT:
-		break;
-	default:
-		err = 0;
-		goto out;
+	} else if (err < 0) {
+		return err;
 	}
 
-	/* Okay, we found MLD message. Check further. */
-	if (skb2->len > len) {
-		err = pskb_trim_rcsum(skb2, len);
-		if (err)
-			goto out;
-		err = -EINVAL;
-	}
-
-	ip6h = ipv6_hdr(skb2);
-
-	switch (skb2->ip_summed) {
-	case CHECKSUM_COMPLETE:
-		if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
-					IPPROTO_ICMPV6, skb2->csum))
-			break;
-		/*FALLTHROUGH*/
-	case CHECKSUM_NONE:
-		skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
-							&ip6h->daddr,
-							skb2->len,
-							IPPROTO_ICMPV6, 0));
-		if (__skb_checksum_complete(skb2))
-			goto out;
-	}
-
-	err = 0;
-
 	BR_INPUT_SKB_CB(skb)->igmp = 1;
+	mld = (struct mld_msg *)skb_transport_header(skb);
 
-	switch (icmp6_type) {
+	switch (mld->mld_type) {
 	case ICMPV6_MGM_REPORT:
-	    {
-		struct mld_msg *mld;
-		if (!pskb_may_pull(skb2, sizeof(*mld))) {
-			err = -EINVAL;
-			goto out;
-		}
-		mld = (struct mld_msg *)skb_transport_header(skb2);
 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
 		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
 		break;
-	    }
 	case ICMPV6_MLD2_REPORT:
-		err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
+		err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
 		break;
 	case ICMPV6_MGM_QUERY:
-		err = br_ip6_multicast_query(br, port, skb2, vid);
+		err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
 		break;
 	case ICMPV6_MGM_REDUCTION:
-	    {
-		struct mld_msg *mld;
-		if (!pskb_may_pull(skb2, sizeof(*mld))) {
-			err = -EINVAL;
-			goto out;
-		}
-		mld = (struct mld_msg *)skb_transport_header(skb2);
 		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
-	    }
+		break;
 	}
 
-out:
-	kfree_skb(skb2);
+	if (skb_trimmed)
+		kfree_skb(skb_trimmed);
+
 	return err;
 }
 #endif
@@ -1949,11 +1775,9 @@
 
 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
 {
-	int err = -ENOENT;
+	int err = -EINVAL;
 
 	spin_lock_bh(&br->multicast_lock);
-	if (!netif_running(br->dev))
-		goto unlock;
 
 	switch (val) {
 	case 0:
@@ -1964,13 +1788,8 @@
 		br->multicast_router = val;
 		err = 0;
 		break;
-
-	default:
-		err = -EINVAL;
-		break;
 	}
 
-unlock:
 	spin_unlock_bh(&br->multicast_lock);
 
 	return err;
@@ -1979,11 +1798,9 @@
 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
 {
 	struct net_bridge *br = p->br;
-	int err = -ENOENT;
+	int err = -EINVAL;
 
 	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
-		goto unlock;
 
 	switch (val) {
 	case 0:
@@ -2005,13 +1822,8 @@
 
 		br_multicast_add_router(br, p);
 		break;
-
-	default:
-		err = -EINVAL;
-		break;
 	}
 
-unlock:
 	spin_unlock(&br->multicast_lock);
 
 	return err;
@@ -2116,15 +1928,11 @@
 
 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
 {
-	int err = -ENOENT;
+	int err = -EINVAL;
 	u32 old;
 	struct net_bridge_mdb_htable *mdb;
 
 	spin_lock_bh(&br->multicast_lock);
-	if (!netif_running(br->dev))
-		goto unlock;
-
-	err = -EINVAL;
 	if (!is_power_of_2(val))
 		goto unlock;
 
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter_hooks.c
similarity index 82%
rename from net/bridge/br_netfilter.c
rename to net/bridge/br_netfilter_hooks.c
index 60ddfbe..d89f4fa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -34,6 +34,7 @@
 
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/addrconf.h>
 #include <net/route.h>
 #include <net/netfilter/br_netfilter.h>
 
@@ -115,22 +116,19 @@
 	char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
 	u8 encap_size;
 	u8 size;
+	u16 vlan_tci;
+	__be16 vlan_proto;
 };
 
 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
 #endif
 
-static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb)
+static void nf_bridge_info_free(struct sk_buff *skb)
 {
-	return skb->nf_bridge;
-}
-
-static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
-{
-	struct net_bridge_port *port;
-
-	port = br_port_get_rcu(dev);
-	return port ? &port->br->fake_rtable : NULL;
+	if (skb->nf_bridge) {
+		nf_bridge_put(skb->nf_bridge);
+		skb->nf_bridge = NULL;
+	}
 }
 
 static inline struct net_device *bridge_parent(const struct net_device *dev)
@@ -141,15 +139,6 @@
 	return port ? port->br->dev : NULL;
 }
 
-static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
-{
-	skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
-	if (likely(skb->nf_bridge))
-		atomic_set(&(skb->nf_bridge->use), 1);
-
-	return skb->nf_bridge;
-}
-
 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
 {
 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
@@ -167,7 +156,7 @@
 	return nf_bridge;
 }
 
-static unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
 {
 	switch (skb->protocol) {
 	case __cpu_to_be16(ETH_P_8021Q):
@@ -179,14 +168,6 @@
 	}
 }
 
-static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
-{
-	unsigned int len = nf_bridge_encap_header_len(skb);
-
-	skb_push(skb, len);
-	skb->network_header -= len;
-}
-
 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
 {
 	unsigned int len = nf_bridge_encap_header_len(skb);
@@ -208,7 +189,7 @@
  * expected format
  */
 
-static int br_parse_ip_options(struct sk_buff *skb)
+static int br_validate_ipv4(struct sk_buff *skb)
 {
 	const struct iphdr *iph;
 	struct net_device *dev = skb->dev;
@@ -256,7 +237,7 @@
 	return -1;
 }
 
-static void nf_bridge_update_protocol(struct sk_buff *skb)
+void nf_bridge_update_protocol(struct sk_buff *skb)
 {
 	switch (skb->nf_bridge->orig_proto) {
 	case BRNF_PROTO_8021Q:
@@ -270,43 +251,12 @@
 	}
 }
 
-/* PF_BRIDGE/PRE_ROUTING *********************************************/
-/* Undo the changes made for ip6tables PREROUTING and continue the
- * bridge PRE_ROUTING hook. */
-static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
-{
-	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
-	struct rtable *rt;
-
-	if (nf_bridge->pkt_otherhost) {
-		skb->pkt_type = PACKET_OTHERHOST;
-		nf_bridge->pkt_otherhost = false;
-	}
-	nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-
-	rt = bridge_parent_rtable(nf_bridge->physindev);
-	if (!rt) {
-		kfree_skb(skb);
-		return 0;
-	}
-	skb_dst_set_noref(skb, &rt->dst);
-
-	skb->dev = nf_bridge->physindev;
-	nf_bridge_update_protocol(skb);
-	nf_bridge_push_encap_header(skb);
-	NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
-		       skb->dev, NULL,
-		       br_handle_frame_finish, 1);
-
-	return 0;
-}
-
 /* Obtain the correct destination MAC address, while preserving the original
  * source MAC address. If we already know this address, we just copy it. If we
  * don't, we use the neighbour framework to find out. In both cases, we make
  * sure that br_handle_frame_finish() is called afterwards.
  */
-static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
+int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
 {
 	struct neighbour *neigh;
 	struct dst_entry *dst;
@@ -346,8 +296,9 @@
 	return 0;
 }
 
-static bool daddr_was_changed(const struct sk_buff *skb,
-			      const struct nf_bridge_info *nf_bridge)
+static inline bool
+br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
+			     const struct nf_bridge_info *nf_bridge)
 {
 	return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
 }
@@ -398,17 +349,15 @@
 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 	struct rtable *rt;
 	int err;
-	int frag_max_size;
 
-	frag_max_size = IPCB(skb)->frag_max_size;
-	BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
+	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
 
 	if (nf_bridge->pkt_otherhost) {
 		skb->pkt_type = PACKET_OTHERHOST;
 		nf_bridge->pkt_otherhost = false;
 	}
-	nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-	if (daddr_was_changed(skb, nf_bridge)) {
+	nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
 			struct in_device *in_dev = __in_dev_get_rcu(dev);
 
@@ -486,7 +435,7 @@
 }
 
 /* Some common code for IPv4/IPv6 */
-static struct net_device *setup_pre_routing(struct sk_buff *skb)
+struct net_device *setup_pre_routing(struct sk_buff *skb)
 {
 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
@@ -509,106 +458,6 @@
 	return skb->dev;
 }
 
-/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
-static int check_hbh_len(struct sk_buff *skb)
-{
-	unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
-	u32 pkt_len;
-	const unsigned char *nh = skb_network_header(skb);
-	int off = raw - nh;
-	int len = (raw[1] + 1) << 3;
-
-	if ((raw + len) - skb->data > skb_headlen(skb))
-		goto bad;
-
-	off += 2;
-	len -= 2;
-
-	while (len > 0) {
-		int optlen = nh[off + 1] + 2;
-
-		switch (nh[off]) {
-		case IPV6_TLV_PAD1:
-			optlen = 1;
-			break;
-
-		case IPV6_TLV_PADN:
-			break;
-
-		case IPV6_TLV_JUMBO:
-			if (nh[off + 1] != 4 || (off & 3) != 2)
-				goto bad;
-			pkt_len = ntohl(*(__be32 *) (nh + off + 2));
-			if (pkt_len <= IPV6_MAXPLEN ||
-			    ipv6_hdr(skb)->payload_len)
-				goto bad;
-			if (pkt_len > skb->len - sizeof(struct ipv6hdr))
-				goto bad;
-			if (pskb_trim_rcsum(skb,
-					    pkt_len + sizeof(struct ipv6hdr)))
-				goto bad;
-			nh = skb_network_header(skb);
-			break;
-		default:
-			if (optlen > len)
-				goto bad;
-			break;
-		}
-		off += optlen;
-		len -= optlen;
-	}
-	if (len == 0)
-		return 0;
-bad:
-	return -1;
-
-}
-
-/* Replicate the checks that IPv6 does on packet reception and pass the packet
- * to ip6tables, which doesn't support NAT, so things are fairly simple. */
-static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
-					   struct sk_buff *skb,
-					   const struct nf_hook_state *state)
-{
-	const struct ipv6hdr *hdr;
-	u32 pkt_len;
-
-	if (skb->len < sizeof(struct ipv6hdr))
-		return NF_DROP;
-
-	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
-		return NF_DROP;
-
-	hdr = ipv6_hdr(skb);
-
-	if (hdr->version != 6)
-		return NF_DROP;
-
-	pkt_len = ntohs(hdr->payload_len);
-
-	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
-		if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
-			return NF_DROP;
-		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
-			return NF_DROP;
-	}
-	if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
-		return NF_DROP;
-
-	nf_bridge_put(skb->nf_bridge);
-	if (!nf_bridge_alloc(skb))
-		return NF_DROP;
-	if (!setup_pre_routing(skb))
-		return NF_DROP;
-
-	skb->protocol = htons(ETH_P_IPV6);
-	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb,
-		skb->dev, NULL,
-		br_nf_pre_routing_finish_ipv6);
-
-	return NF_STOLEN;
-}
-
 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
  * Replicate the checks that IPv4 does on packet reception.
  * Set skb->dev to the bridge device (i.e. parent of the
@@ -648,7 +497,7 @@
 
 	nf_bridge_pull_encap_header_rcsum(skb);
 
-	if (br_parse_ip_options(skb))
+	if (br_validate_ipv4(skb))
 		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
@@ -692,12 +541,12 @@
 	struct net_device *in;
 
 	if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
-		int frag_max_size;
 
-		if (skb->protocol == htons(ETH_P_IP)) {
-			frag_max_size = IPCB(skb)->frag_max_size;
-			BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
-		}
+		if (skb->protocol == htons(ETH_P_IP))
+			nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
+
+		if (skb->protocol == htons(ETH_P_IPV6))
+			nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
 
 		in = nf_bridge->physindev;
 		if (nf_bridge->pkt_otherhost) {
@@ -760,12 +609,15 @@
 	}
 
 	if (pf == NFPROTO_IPV4) {
-		int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
-
-		if (br_parse_ip_options(skb))
+		if (br_validate_ipv4(skb))
 			return NF_DROP;
+		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
+	}
 
-		IPCB(skb)->frag_max_size = frag_max;
+	if (pf == NFPROTO_IPV6) {
+		if (br_validate_ipv6(skb))
+			return NF_DROP;
+		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
 	}
 
 	nf_bridge->physoutdev = skb->dev;
@@ -815,7 +667,7 @@
 	return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
 {
 	struct brnf_frag_data *data;
@@ -829,33 +681,95 @@
 		return 0;
 	}
 
+	if (data->vlan_tci) {
+		skb->vlan_tci = data->vlan_tci;
+		skb->vlan_proto = data->vlan_proto;
+	}
+
 	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
 	__skb_push(skb, data->encap_size);
 
+	nf_bridge_info_free(skb);
 	return br_dev_queue_push_xmit(sk, skb);
 }
+#endif
+
+static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
+			     int (*output)(struct sock *, struct sk_buff *))
+{
+	unsigned int mtu = ip_skb_dst_mtu(skb);
+	struct iphdr *iph = ip_hdr(skb);
+	struct rtable *rt = skb_rtable(skb);
+	struct net_device *dev = rt->dst.dev;
+
+	if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
+		     (IPCB(skb)->frag_max_size &&
+		      IPCB(skb)->frag_max_size > mtu))) {
+		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+		kfree_skb(skb);
+		return -EMSGSIZE;
+	}
+
+	return ip_do_fragment(sk, skb, output);
+}
+
+static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
+{
+	if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
+		return PPPOE_SES_HLEN;
+	return 0;
+}
 
 static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 {
-	int ret;
-	int frag_max_size;
+	struct nf_bridge_info *nf_bridge;
 	unsigned int mtu_reserved;
 
-	if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
-		return br_dev_queue_push_xmit(sk, skb);
-
 	mtu_reserved = nf_bridge_mtu_reduction(skb);
+
+	if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+		nf_bridge_info_free(skb);
+		return br_dev_queue_push_xmit(sk, skb);
+	}
+
+	nf_bridge = nf_bridge_info_get(skb);
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 	/* This is wrong! We should preserve the original fragment
 	 * boundaries by preserving frag_list rather than refragmenting.
 	 */
-	if (skb->len + mtu_reserved > skb->dev->mtu) {
+	if (skb->protocol == htons(ETH_P_IP)) {
 		struct brnf_frag_data *data;
 
-		frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
-		if (br_parse_ip_options(skb))
-			/* Drop invalid packet */
+		if (br_validate_ipv4(skb))
 			return NF_DROP;
-		IPCB(skb)->frag_max_size = frag_max_size;
+
+		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
+
+		nf_bridge_update_protocol(skb);
+
+		data = this_cpu_ptr(&brnf_frag_data_storage);
+
+		data->vlan_tci = skb->vlan_tci;
+		data->vlan_proto = skb->vlan_proto;
+		data->encap_size = nf_bridge_encap_header_len(skb);
+		data->size = ETH_HLEN + data->encap_size;
+
+		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+						 data->size);
+
+		return br_nf_ip_fragment(sk, skb, br_nf_push_frag_xmit);
+	}
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+	if (skb->protocol == htons(ETH_P_IPV6)) {
+		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+		struct brnf_frag_data *data;
+
+		if (br_validate_ipv6(skb))
+			return NF_DROP;
+
+		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
 
 		nf_bridge_update_protocol(skb);
 
@@ -866,19 +780,15 @@
 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
 						 data->size);
 
-		ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
-	} else {
-		ret = br_dev_queue_push_xmit(sk, skb);
+		if (v6ops)
+			return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
+		else
+			return -EMSGSIZE;
 	}
-
-	return ret;
-}
-#else
-static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
-{
-        return br_dev_queue_push_xmit(sk, skb);
-}
 #endif
+	nf_bridge_info_free(skb);
+	return br_dev_queue_push_xmit(sk, skb);
+}
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
 static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
@@ -964,6 +874,8 @@
 				       nf_bridge->neigh_header,
 				       ETH_HLEN - ETH_ALEN);
 	skb->dev = nf_bridge->physindev;
+
+	nf_bridge->physoutdev = NULL;
 	br_handle_frame_finish(NULL, skb);
 }
 
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
new file mode 100644
index 0000000..6d12d26
--- /dev/null
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -0,0 +1,245 @@
+/*
+ *	Handle firewalling
+ *	Linux ethernet bridge
+ *
+ *	Authors:
+ *	Lennert Buytenhek		<buytenh@gnu.org>
+ *	Bart De Schuymer		<bdschuym@pandora.be>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Lennert dedicates this file to Kerstin Wurdinger.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/in_route.h>
+#include <linux/inetdevice.h>
+
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <net/route.h>
+#include <net/netfilter/br_netfilter.h>
+
+#include <asm/uaccess.h>
+#include "br_private.h"
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+
+/* We only check the length. A bridge shouldn't do any hop-by-hop stuff
+ * anyway
+ */
+static int br_nf_check_hbh_len(struct sk_buff *skb)
+{
+	unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
+	u32 pkt_len;
+	const unsigned char *nh = skb_network_header(skb);
+	int off = raw - nh;
+	int len = (raw[1] + 1) << 3;
+
+	if ((raw + len) - skb->data > skb_headlen(skb))
+		goto bad;
+
+	off += 2;
+	len -= 2;
+
+	while (len > 0) {
+		int optlen = nh[off + 1] + 2;
+
+		switch (nh[off]) {
+		case IPV6_TLV_PAD1:
+			optlen = 1;
+			break;
+
+		case IPV6_TLV_PADN:
+			break;
+
+		case IPV6_TLV_JUMBO:
+			if (nh[off + 1] != 4 || (off & 3) != 2)
+				goto bad;
+			pkt_len = ntohl(*(__be32 *)(nh + off + 2));
+			if (pkt_len <= IPV6_MAXPLEN ||
+			    ipv6_hdr(skb)->payload_len)
+				goto bad;
+			if (pkt_len > skb->len - sizeof(struct ipv6hdr))
+				goto bad;
+			if (pskb_trim_rcsum(skb,
+					    pkt_len + sizeof(struct ipv6hdr)))
+				goto bad;
+			nh = skb_network_header(skb);
+			break;
+		default:
+			if (optlen > len)
+				goto bad;
+			break;
+		}
+		off += optlen;
+		len -= optlen;
+	}
+	if (len == 0)
+		return 0;
+bad:
+	return -1;
+}
+
+int br_validate_ipv6(struct sk_buff *skb)
+{
+	const struct ipv6hdr *hdr;
+	struct net_device *dev = skb->dev;
+	struct inet6_dev *idev = in6_dev_get(skb->dev);
+	u32 pkt_len;
+	u8 ip6h_len = sizeof(struct ipv6hdr);
+
+	if (!pskb_may_pull(skb, ip6h_len))
+		goto inhdr_error;
+
+	if (skb->len < ip6h_len)
+		goto drop;
+
+	hdr = ipv6_hdr(skb);
+
+	if (hdr->version != 6)
+		goto inhdr_error;
+
+	pkt_len = ntohs(hdr->payload_len);
+
+	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
+		if (pkt_len + ip6h_len > skb->len) {
+			IP6_INC_STATS_BH(dev_net(dev), idev,
+					 IPSTATS_MIB_INTRUNCATEDPKTS);
+			goto drop;
+		}
+		if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
+			IP6_INC_STATS_BH(dev_net(dev), idev,
+					 IPSTATS_MIB_INDISCARDS);
+			goto drop;
+		}
+	}
+	if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
+		goto drop;
+
+	memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+	/* No IP options in IPv6 header; however it should be
+	 * checked if some next headers need special treatment
+	 */
+	return 0;
+
+inhdr_error:
+	IP6_INC_STATS_BH(dev_net(dev), idev, IPSTATS_MIB_INHDRERRORS);
+drop:
+	return -1;
+}
+
+static inline bool
+br_nf_ipv6_daddr_was_changed(const struct sk_buff *skb,
+			     const struct nf_bridge_info *nf_bridge)
+{
+	return memcmp(&nf_bridge->ipv6_daddr, &ipv6_hdr(skb)->daddr,
+		      sizeof(ipv6_hdr(skb)->daddr)) != 0;
+}
+
+/* PF_BRIDGE/PRE_ROUTING: Undo the changes made for ip6tables
+ * PREROUTING and continue the bridge PRE_ROUTING hook. See comment
+ * for br_nf_pre_routing_finish(), same logic is used here but
+ * equivalent IPv6 function ip6_route_input() called indirectly.
+ */
+static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
+{
+	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+	struct rtable *rt;
+	struct net_device *dev = skb->dev;
+	const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+
+	nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
+
+	if (nf_bridge->pkt_otherhost) {
+		skb->pkt_type = PACKET_OTHERHOST;
+		nf_bridge->pkt_otherhost = false;
+	}
+	nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+	if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
+		skb_dst_drop(skb);
+		v6ops->route_input(skb);
+
+		if (skb_dst(skb)->error) {
+			kfree_skb(skb);
+			return 0;
+		}
+
+		if (skb_dst(skb)->dev == dev) {
+			skb->dev = nf_bridge->physindev;
+			nf_bridge_update_protocol(skb);
+			nf_bridge_push_encap_header(skb);
+			NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
+				       sk, skb, skb->dev, NULL,
+				       br_nf_pre_routing_finish_bridge,
+				       1);
+			return 0;
+		}
+		ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
+		skb->pkt_type = PACKET_HOST;
+	} else {
+		rt = bridge_parent_rtable(nf_bridge->physindev);
+		if (!rt) {
+			kfree_skb(skb);
+			return 0;
+		}
+		skb_dst_set_noref(skb, &rt->dst);
+	}
+
+	skb->dev = nf_bridge->physindev;
+	nf_bridge_update_protocol(skb);
+	nf_bridge_push_encap_header(skb);
+	NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+		       skb->dev, NULL,
+		       br_handle_frame_finish, 1);
+
+	return 0;
+}
+
+/* Replicate the checks that IPv6 does on packet reception and pass the packet
+ * to ip6tables.
+ */
+unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
+				    struct sk_buff *skb,
+				    const struct nf_hook_state *state)
+{
+	struct nf_bridge_info *nf_bridge;
+
+	if (br_validate_ipv6(skb))
+		return NF_DROP;
+
+	nf_bridge_put(skb->nf_bridge);
+	if (!nf_bridge_alloc(skb))
+		return NF_DROP;
+	if (!setup_pre_routing(skb))
+		return NF_DROP;
+
+	nf_bridge = nf_bridge_info_get(skb);
+	nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
+
+	skb->protocol = htons(ETH_P_IPV6);
+	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb,
+		skb->dev, NULL,
+		br_nf_pre_routing_finish_ipv6);
+
+	return NF_STOLEN;
+}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4b5c236..6b67ed3 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -586,7 +586,7 @@
 	struct nlattr *afspec;
 	struct net_bridge_port *p;
 	struct nlattr *tb[IFLA_BRPORT_MAX + 1];
-	int err = 0, ret_offload = 0;
+	int err = 0;
 
 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
@@ -628,16 +628,6 @@
 				afspec, RTM_SETLINK);
 	}
 
-	if (p && !(flags & BRIDGE_FLAGS_SELF)) {
-		/* set bridge attributes in hardware if supported
-		 */
-		ret_offload = netdev_switch_port_bridge_setlink(dev, nlh,
-								flags);
-		if (ret_offload && ret_offload != -EOPNOTSUPP)
-			br_warn(p->br, "error setting attrs on port %u(%s)\n",
-				(unsigned int)p->port_no, p->dev->name);
-	}
-
 	if (err == 0)
 		br_ifinfo_notify(RTM_NEWLINK, p);
 out:
@@ -649,7 +639,7 @@
 {
 	struct nlattr *afspec;
 	struct net_bridge_port *p;
-	int err = 0, ret_offload = 0;
+	int err = 0;
 
 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
 	if (!afspec)
@@ -668,16 +658,6 @@
 		 */
 		br_ifinfo_notify(RTM_NEWLINK, p);
 
-	if (p && !(flags & BRIDGE_FLAGS_SELF)) {
-		/* del bridge attributes in hardware
-		 */
-		ret_offload = netdev_switch_port_bridge_dellink(dev, nlh,
-								flags);
-		if (ret_offload && ret_offload != -EOPNOTSUPP)
-			br_warn(p->br, "error deleting attrs on port %u (%s)\n",
-				(unsigned int)p->port_no, p->dev->name);
-	}
-
 	return err;
 }
 static int br_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3362c29..8b21146 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -18,6 +18,7 @@
 #include <linux/netpoll.h>
 #include <linux/u64_stats_sync.h>
 #include <net/route.h>
+#include <net/ip6_fib.h>
 #include <linux/if_vlan.h>
 
 #define BR_HASH_BITS 8
@@ -33,8 +34,8 @@
 
 /* Control of forwarding link local multicast */
 #define BR_GROUPFWD_DEFAULT	0
-/* Don't allow forwarding control protocols like STP and LLDP */
-#define BR_GROUPFWD_RESTRICTED	0x4007u
+/* Don't allow forwarding of control protocols like STP, MAC PAUSE and LACP */
+#define BR_GROUPFWD_RESTRICTED	0x0007u
 /* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
 #define BR_GROUPFWD_8021AD	0xB801u
 
@@ -214,7 +215,10 @@
 	spinlock_t			hash_lock;
 	struct hlist_head		hash[BR_HASH_SIZE];
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	struct rtable 			fake_rtable;
+	union {
+		struct rtable		fake_rtable;
+		struct rt6_info		fake_rt6_info;
+	};
 	bool				nf_call_iptables;
 	bool				nf_call_ip6tables;
 	bool				nf_call_arptables;
@@ -304,7 +308,6 @@
 	int mrouters_only;
 #endif
 
-	u16 frag_max_size;
 	bool proxyarp_replied;
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
@@ -384,7 +387,7 @@
 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
 void br_fdb_cleanup(unsigned long arg);
 void br_fdb_delete_by_port(struct net_bridge *br,
-			   const struct net_bridge_port *p, int do_all);
+			   const struct net_bridge_port *p, u16 vid, int do_all);
 struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
 					  const unsigned char *addr, __u16 vid);
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index fb3ebe6..b4b6dab 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -39,10 +39,14 @@
 
 void br_set_state(struct net_bridge_port *p, unsigned int state)
 {
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_PORT_STP_STATE,
+		.u.stp_state = state,
+	};
 	int err;
 
 	p->state = state;
-	err = netdev_switch_port_stp_update(p->dev, state);
+	err = switchdev_port_attr_set(p->dev, &attr);
 	if (err && err != -EOPNOTSUPP)
 		br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
 				(unsigned int) p->port_no, p->dev->name);
@@ -424,7 +428,6 @@
 	else
 		br_set_state(p, BR_STATE_LEARNING);
 
-	br_multicast_enable_port(p);
 	br_log_state(p);
 	br_ifinfo_notify(RTM_NEWLINK, p);
 
@@ -458,6 +461,12 @@
 			}
 		}
 
+		if (p->state != BR_STATE_BLOCKING)
+			br_multicast_enable_port(p);
+		/* Multicast is not disabled for the port when it goes in
+		 * blocking state because the timers will expire and stop by
+		 * themselves without sending more queries.
+		 */
 		if (p->state == BR_STATE_FORWARDING)
 			++liveports;
 	}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 4114687..a2730e7 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -111,7 +111,7 @@
 	del_timer(&p->forward_delay_timer);
 	del_timer(&p->hold_timer);
 
-	br_fdb_delete_by_port(br, p, 0);
+	br_fdb_delete_by_port(br, p, 0, 0);
 	br_multicast_disable_port(p);
 
 	br_configuration_update(br);
@@ -243,12 +243,13 @@
 	return true;
 }
 
-/* called under bridge lock */
+/* Acquires and releases bridge lock */
 void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
 {
 	struct net_bridge_port *p;
 	int wasroot;
 
+	spin_lock_bh(&br->lock);
 	wasroot = br_is_root_bridge(br);
 
 	list_for_each_entry(p, &br->port_list, list) {
@@ -266,6 +267,7 @@
 	br_port_state_selection(br);
 	if (br_is_root_bridge(br) && !wasroot)
 		br_become_root_bridge(br);
+	spin_unlock_bh(&br->lock);
 }
 
 /* called under bridge lock */
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 4905845..efe415a 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -160,7 +160,7 @@
 
 static int store_flush(struct net_bridge_port *p, unsigned long v)
 {
-	br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
+	br_fdb_delete_by_port(p->br, p, 0, 0); // Don't delete local entry
 	return 0;
 }
 static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 13013fe..0d41f81 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -2,6 +2,7 @@
 #include <linux/netdevice.h>
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 
@@ -36,6 +37,36 @@
 		clear_bit(vid, v->untagged_bitmap);
 }
 
+static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
+			  u16 vid, u16 flags)
+{
+	const struct net_device_ops *ops = dev->netdev_ops;
+	int err;
+
+	/* If driver uses VLAN ndo ops, use 8021q to install vid
+	 * on device, otherwise try switchdev ops to install vid.
+	 */
+
+	if (ops->ndo_vlan_rx_add_vid) {
+		err = vlan_vid_add(dev, br->vlan_proto, vid);
+	} else {
+		struct switchdev_obj vlan_obj = {
+			.id = SWITCHDEV_OBJ_PORT_VLAN,
+			.u.vlan = {
+				.flags = flags,
+				.vid_begin = vid,
+				.vid_end = vid,
+			},
+		};
+
+		err = switchdev_port_obj_add(dev, &vlan_obj);
+		if (err == -EOPNOTSUPP)
+			err = 0;
+	}
+
+	return err;
+}
+
 static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
 {
 	struct net_bridge_port *p = NULL;
@@ -62,7 +93,7 @@
 		 * This ensures tagged traffic enters the bridge when
 		 * promiscuous mode is disabled by br_manage_promisc().
 		 */
-		err = vlan_vid_add(dev, br->vlan_proto, vid);
+		err = __vlan_vid_add(dev, br, vid, flags);
 		if (err)
 			return err;
 	}
@@ -86,6 +117,30 @@
 	return err;
 }
 
+static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+			   u16 vid)
+{
+	const struct net_device_ops *ops = dev->netdev_ops;
+
+	/* If driver uses VLAN ndo ops, use 8021q to delete vid
+	 * on device, otherwise try switchdev ops to delete vid.
+	 */
+
+	if (ops->ndo_vlan_rx_kill_vid) {
+		vlan_vid_del(dev, br->vlan_proto, vid);
+	} else {
+		struct switchdev_obj vlan_obj = {
+			.id = SWITCHDEV_OBJ_PORT_VLAN,
+			.u.vlan = {
+				.vid_begin = vid,
+				.vid_end = vid,
+			},
+		};
+
+		switchdev_port_obj_del(dev, &vlan_obj);
+	}
+}
+
 static int __vlan_del(struct net_port_vlans *v, u16 vid)
 {
 	if (!test_bit(vid, v->vlan_bitmap))
@@ -96,7 +151,7 @@
 
 	if (v->port_idx) {
 		struct net_bridge_port *p = v->parent.port;
-		vlan_vid_del(p->dev, p->br->vlan_proto, vid);
+		__vlan_vid_del(p->dev, p->br, vid);
 	}
 
 	clear_bit(vid, v->vlan_bitmap);
@@ -686,6 +741,7 @@
 		return -EINVAL;
 
 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
+	br_fdb_delete_by_port(port->br, port, vid, 0);
 
 	return __vlan_del(pv, vid);
 }
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 071d872..0c40570 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -164,8 +164,10 @@
 	    !(info->bitmask & EBT_STP_MASK))
 		return -EINVAL;
 	/* Make sure the match only receives stp frames */
-	if (!ether_addr_equal(e->destmac, bridge_ula) ||
-	    !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
+	if (!par->nft_compat &&
+	    (!ether_addr_equal(e->destmac, bridge_ula) ||
+	     !ether_addr_equal(e->destmsk, msk) ||
+	     !(e->bitmask & EBT_DESTMAC)))
 		return -EINVAL;
 
 	return 0;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c2cabbe..18ca4b2 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -139,7 +139,7 @@
 		ethproto = h->h_proto;
 
 	if (e->bitmask & EBT_802_3) {
-		if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
+		if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO))
 			return 1;
 	} else if (!(e->bitmask & EBT_NOPROTO) &&
 	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 112ad78..3cc71b9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1055,7 +1055,7 @@
 	 * is really not used at all in the net/core or socket.c but the
 	 * initialization makes sure that sock->state is not uninitialized.
 	 */
-	sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+	sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 32d710e..7933e62 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -179,7 +179,7 @@
 
 	sock->ops = cp->ops;
 
-	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
+	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
 	if (!sk) {
 		err = -ENOMEM;
 		goto errout;
@@ -310,8 +310,12 @@
 		return err;
 	}
 
-	if (newskb)
+	if (newskb) {
+		if (!(newskb->tstamp.tv64))
+			__net_timestamp(newskb);
+
 		netif_rx_ni(newskb);
+	}
 
 	/* update statistics */
 	can_stats.tx_frames++;
diff --git a/net/can/gw.c b/net/can/gw.c
index a6f448e..4551687 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -110,6 +110,7 @@
 		void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
 		void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
 	} csumfunc;
+	u32 uid;
 };
 
 
@@ -548,6 +549,11 @@
 			goto cancel;
 	}
 
+	if (gwj->mod.uid) {
+		if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
+			goto cancel;
+	}
+
 	if (gwj->mod.csumfunc.crc8) {
 		if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
 			    &gwj->mod.csum.crc8) < 0)
@@ -619,6 +625,7 @@
 	[CGW_DST_IF]	= { .type = NLA_U32 },
 	[CGW_FILTER]	= { .len = sizeof(struct can_filter) },
 	[CGW_LIM_HOPS]	= { .type = NLA_U8 },
+	[CGW_MOD_UID]	= { .type = NLA_U32 },
 };
 
 /* check for common and gwtype specific attributes */
@@ -761,6 +768,10 @@
 			else
 				mod->csumfunc.xor = cgw_csum_xor_neg;
 		}
+
+		if (tb[CGW_MOD_UID]) {
+			nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
+		}
 	}
 
 	if (gwtype == CGW_TYPE_CAN_CAN) {
@@ -802,6 +813,8 @@
 {
 	struct rtcanmsg *r;
 	struct cgw_job *gwj;
+	struct cf_mod mod;
+	struct can_can_gw ccgw;
 	u8 limhops = 0;
 	int err = 0;
 
@@ -819,6 +832,36 @@
 	if (r->gwtype != CGW_TYPE_CAN_CAN)
 		return -EINVAL;
 
+	err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+	if (err < 0)
+		return err;
+
+	if (mod.uid) {
+
+		ASSERT_RTNL();
+
+		/* check for updating an existing job with identical uid */
+		hlist_for_each_entry(gwj, &cgw_list, list) {
+
+			if (gwj->mod.uid != mod.uid)
+				continue;
+
+			/* interfaces & filters must be identical */
+			if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+				return -EINVAL;
+
+			/* update modifications with disabled softirq & quit */
+			local_bh_disable();
+			memcpy(&gwj->mod, &mod, sizeof(mod));
+			local_bh_enable();
+			return 0;
+		}
+	}
+
+	/* ifindex == 0 is not allowed for job creation */
+	if (!ccgw.src_idx || !ccgw.dst_idx)
+		return -ENODEV;
+
 	gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
 	if (!gwj)
 		return -ENOMEM;
@@ -828,18 +871,14 @@
 	gwj->deleted_frames = 0;
 	gwj->flags = r->flags;
 	gwj->gwtype = r->gwtype;
+	gwj->limit_hops = limhops;
 
-	err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw,
-			     &limhops);
-	if (err < 0)
-		goto out;
+	/* insert already parsed information */
+	memcpy(&gwj->mod, &mod, sizeof(mod));
+	memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
 
 	err = -ENODEV;
 
-	/* ifindex == 0 is not allowed for job creation */
-	if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
-		goto out;
-
 	gwj->src.dev = __dev_get_by_index(&init_net, gwj->ccgw.src_idx);
 
 	if (!gwj->src.dev)
@@ -856,8 +895,6 @@
 	if (gwj->dst.dev->type != ARPHRD_CAN)
 		goto out;
 
-	gwj->limit_hops = limhops;
-
 	ASSERT_RTNL();
 
 	err = cgw_register_filter(gwj);
@@ -931,8 +968,15 @@
 		if (gwj->limit_hops != limhops)
 			continue;
 
-		if (memcmp(&gwj->mod, &mod, sizeof(mod)))
-			continue;
+		/* we have a match when uid is enabled and identical */
+		if (gwj->mod.uid || mod.uid) {
+			if (gwj->mod.uid != mod.uid)
+				continue;
+		} else {
+			/* no uid => check for identical modifications */
+			if (memcmp(&gwj->mod, &mod, sizeof(mod)))
+				continue;
+		}
 
 		/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
 		if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 967080a..073262f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -480,8 +480,8 @@
 	int ret;
 
 	BUG_ON(con->sock);
-	ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
-			       IPPROTO_TCP, &sock);
+	ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family,
+			       SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (ret)
 		return ret;
 	sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/dev.c b/net/core/dev.c
index aa82f9a..6778a99 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -135,6 +135,7 @@
 #include <linux/if_macvlan.h>
 #include <linux/errqueue.h>
 #include <linux/hrtimer.h>
+#include <linux/netfilter_ingress.h>
 
 #include "net-sysfs.h"
 
@@ -468,10 +469,14 @@
  */
 void dev_add_offload(struct packet_offload *po)
 {
-	struct list_head *head = &offload_base;
+	struct packet_offload *elem;
 
 	spin_lock(&offload_lock);
-	list_add_rcu(&po->list, head);
+	list_for_each_entry(elem, &offload_base, list) {
+		if (po->priority < elem->priority)
+			break;
+	}
+	list_add_rcu(&po->list, elem->list.prev);
 	spin_unlock(&offload_lock);
 }
 EXPORT_SYMBOL(dev_add_offload);
@@ -1630,7 +1635,7 @@
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
 static struct static_key ingress_needed __read_mostly;
 
 void net_inc_ingress_queue(void)
@@ -2343,6 +2348,34 @@
 }
 EXPORT_SYMBOL(netif_device_attach);
 
+/*
+ * Returns a Tx hash based on the given packet descriptor a Tx queues' number
+ * to be used as a distribution range.
+ */
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+		  unsigned int num_tx_queues)
+{
+	u32 hash;
+	u16 qoffset = 0;
+	u16 qcount = num_tx_queues;
+
+	if (skb_rx_queue_recorded(skb)) {
+		hash = skb_get_rx_queue(skb);
+		while (unlikely(hash >= num_tx_queues))
+			hash -= num_tx_queues;
+		return hash;
+	}
+
+	if (dev->num_tc) {
+		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+		qoffset = dev->tc_to_txq[tc].offset;
+		qcount = dev->tc_to_txq[tc].count;
+	}
+
+	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
+}
+EXPORT_SYMBOL(__skb_tx_hash);
+
 static void skb_warn_bad_offload(const struct sk_buff *skb)
 {
 	static const netdev_features_t null_features = 0;
@@ -2901,6 +2934,84 @@
 }
 EXPORT_SYMBOL(dev_loopback_xmit);
 
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+	struct xps_dev_maps *dev_maps;
+	struct xps_map *map;
+	int queue_index = -1;
+
+	rcu_read_lock();
+	dev_maps = rcu_dereference(dev->xps_maps);
+	if (dev_maps) {
+		map = rcu_dereference(
+		    dev_maps->cpu_map[skb->sender_cpu - 1]);
+		if (map) {
+			if (map->len == 1)
+				queue_index = map->queues[0];
+			else
+				queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
+									   map->len)];
+			if (unlikely(queue_index >= dev->real_num_tx_queues))
+				queue_index = -1;
+		}
+	}
+	rcu_read_unlock();
+
+	return queue_index;
+#else
+	return -1;
+#endif
+}
+
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+	struct sock *sk = skb->sk;
+	int queue_index = sk_tx_queue_get(sk);
+
+	if (queue_index < 0 || skb->ooo_okay ||
+	    queue_index >= dev->real_num_tx_queues) {
+		int new_index = get_xps_queue(dev, skb);
+		if (new_index < 0)
+			new_index = skb_tx_hash(dev, skb);
+
+		if (queue_index != new_index && sk &&
+		    rcu_access_pointer(sk->sk_dst_cache))
+			sk_tx_queue_set(sk, new_index);
+
+		queue_index = new_index;
+	}
+
+	return queue_index;
+}
+
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+				    struct sk_buff *skb,
+				    void *accel_priv)
+{
+	int queue_index = 0;
+
+#ifdef CONFIG_XPS
+	if (skb->sender_cpu == 0)
+		skb->sender_cpu = raw_smp_processor_id() + 1;
+#endif
+
+	if (dev->real_num_tx_queues != 1) {
+		const struct net_device_ops *ops = dev->netdev_ops;
+		if (ops->ndo_select_queue)
+			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+							    __netdev_pick_tx);
+		else
+			queue_index = __netdev_pick_tx(dev, skb);
+
+		if (!accel_priv)
+			queue_index = netdev_cap_txqueue(dev, queue_index);
+	}
+
+	skb_set_queue_mapping(skb, queue_index);
+	return netdev_get_tx_queue(dev, queue_index);
+}
+
 /**
  *	__dev_queue_xmit - transmit a buffer
  *	@skb: buffer to transmit
@@ -3513,66 +3624,47 @@
 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
 #endif
 
-#ifdef CONFIG_NET_CLS_ACT
-/* TODO: Maybe we should just force sch_ingress to be compiled in
- * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
- * a compare and 2 stores extra right now if we dont have it on
- * but have CONFIG_NET_CLS_ACT
- * NOTE: This doesn't stop any functionality; if you dont have
- * the ingress scheduler, you just can't add policies on ingress.
- *
- */
-static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
-{
-	struct net_device *dev = skb->dev;
-	u32 ttl = G_TC_RTTL(skb->tc_verd);
-	int result = TC_ACT_OK;
-	struct Qdisc *q;
-
-	if (unlikely(MAX_RED_LOOP < ttl++)) {
-		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
-				     skb->skb_iif, dev->ifindex);
-		return TC_ACT_SHOT;
-	}
-
-	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
-	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-
-	q = rcu_dereference(rxq->qdisc);
-	if (q != &noop_qdisc) {
-		spin_lock(qdisc_lock(q));
-		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
-			result = qdisc_enqueue_root(skb, q);
-		spin_unlock(qdisc_lock(q));
-	}
-
-	return result;
-}
-
 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
 					 struct packet_type **pt_prev,
 					 int *ret, struct net_device *orig_dev)
 {
-	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
+#ifdef CONFIG_NET_CLS_ACT
+	struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
+	struct tcf_result cl_res;
 
-	if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
+	/* If there's at least one ingress present somewhere (so
+	 * we get here via enabled static key), remaining devices
+	 * that are not configured with an ingress qdisc will bail
+	 * out here.
+	 */
+	if (!cl)
 		return skb;
-
 	if (*pt_prev) {
 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
 		*pt_prev = NULL;
 	}
 
-	switch (ing_filter(skb, rxq)) {
+	qdisc_skb_cb(skb)->pkt_len = skb->len;
+	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
+	qdisc_bstats_update_cpu(cl->q, skb);
+
+	switch (tc_classify(skb, cl, &cl_res)) {
+	case TC_ACT_OK:
+	case TC_ACT_RECLASSIFY:
+		skb->tc_index = TC_H_MIN(cl_res.classid);
+		break;
 	case TC_ACT_SHOT:
+		qdisc_qstats_drop_cpu(cl->q);
 	case TC_ACT_STOLEN:
+	case TC_ACT_QUEUED:
 		kfree_skb(skb);
 		return NULL;
+	default:
+		break;
 	}
-
+#endif /* CONFIG_NET_CLS_ACT */
 	return skb;
 }
-#endif
 
 /**
  *	netdev_rx_handler_register - register receive handler
@@ -3645,6 +3737,22 @@
 	}
 }
 
+static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
+			     int *ret, struct net_device *orig_dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+	if (nf_hook_ingress_active(skb)) {
+		if (*pt_prev) {
+			*ret = deliver_skb(skb, *pt_prev, orig_dev);
+			*pt_prev = NULL;
+		}
+
+		return nf_hook_ingress(skb);
+	}
+#endif /* CONFIG_NETFILTER_INGRESS */
+	return 0;
+}
+
 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 {
 	struct packet_type *ptype, *pt_prev;
@@ -3704,13 +3812,17 @@
 	}
 
 skip_taps:
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
 	if (static_key_false(&ingress_needed)) {
 		skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
 		if (!skb)
 			goto unlock;
-	}
 
+		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
+			goto unlock;
+	}
+#endif
+#ifdef CONFIG_NET_CLS_ACT
 	skb->tc_verd = 0;
 ncls:
 #endif
@@ -6313,6 +6425,17 @@
 	return 0;
 }
 
+void netif_tx_stop_all_queues(struct net_device *dev)
+{
+	unsigned int i;
+
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+		netif_tx_stop_queue(txq);
+	}
+}
+EXPORT_SYMBOL(netif_tx_stop_all_queues);
+
 /**
  *	register_netdevice	- register a network device
  *	@dev: device to register
@@ -6862,6 +6985,9 @@
 	dev->group = INIT_NETDEV_GROUP;
 	if (!dev->ethtool_ops)
 		dev->ethtool_ops = &default_ethtool_ops;
+
+	nf_hook_ingress_init(dev);
+
 	return dev;
 
 free_all:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1d00b89..b495ab1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -98,7 +98,6 @@
 	[NETIF_F_RXALL_BIT] =            "rx-all",
 	[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
 	[NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
-	[NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
 };
 
 static const char
@@ -107,6 +106,13 @@
 	[ETH_RSS_HASH_XOR_BIT] =	"xor",
 };
 
+static const char
+tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
+	[ETHTOOL_ID_UNSPEC]     = "Unspec",
+	[ETHTOOL_RX_COPYBREAK]	= "rx-copybreak",
+	[ETHTOOL_TX_COPYBREAK]	= "tx-copybreak",
+};
+
 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
 {
 	struct ethtool_gfeatures cmd = {
@@ -195,6 +201,9 @@
 	if (sset == ETH_SS_RSS_HASH_FUNCS)
 		return ARRAY_SIZE(rss_hash_func_strings);
 
+	if (sset == ETH_SS_TUNABLES)
+		return ARRAY_SIZE(tunable_strings);
+
 	if (ops->get_sset_count && ops->get_strings)
 		return ops->get_sset_count(dev, sset);
 	else
@@ -212,6 +221,8 @@
 	else if (stringset == ETH_SS_RSS_HASH_FUNCS)
 		memcpy(data, rss_hash_func_strings,
 		       sizeof(rss_hash_func_strings));
+	else if (stringset == ETH_SS_TUNABLES)
+		memcpy(data, tunable_strings, sizeof(tunable_strings));
 	else
 		/* ops->get_strings is valid because checked earlier */
 		ops->get_strings(dev, stringset, data);
diff --git a/net/core/filter.c b/net/core/filter.c
index bf831a8..be3098f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -36,6 +36,7 @@
 #include <net/netlink.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
+#include <net/flow_dissector.h>
 #include <linux/errno.h>
 #include <linux/timer.h>
 #include <asm/uaccess.h>
@@ -45,6 +46,7 @@
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
+#include <net/sch_generic.h>
 
 /**
  *	sk_filter - run a packet through a socket filter
@@ -355,8 +357,8 @@
  * for socket filters: ctx == 'struct sk_buff *', for seccomp:
  * ctx == 'struct seccomp_data *'.
  */
-int bpf_convert_filter(struct sock_filter *prog, int len,
-		       struct bpf_insn *new_prog, int *new_len)
+static int bpf_convert_filter(struct sock_filter *prog, int len,
+			      struct bpf_insn *new_prog, int *new_len)
 {
 	int new_flen = 0, pass = 0, target, i;
 	struct bpf_insn *new_insn;
@@ -371,7 +373,8 @@
 		return -EINVAL;
 
 	if (new_prog) {
-		addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
+		addrs = kcalloc(len, sizeof(*addrs),
+				GFP_KERNEL | __GFP_NOWARN);
 		if (!addrs)
 			return -ENOMEM;
 	}
@@ -751,7 +754,8 @@
  *
  * Returns 0 if the rule set is legal or -EINVAL if not.
  */
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
+static int bpf_check_classic(const struct sock_filter *filter,
+			     unsigned int flen)
 {
 	bool anc_found;
 	int pc;
@@ -825,7 +829,6 @@
 
 	return -EINVAL;
 }
-EXPORT_SYMBOL(bpf_check_classic);
 
 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
 				      const struct sock_fprog *fprog)
@@ -839,7 +842,9 @@
 
 	fkprog = fp->orig_prog;
 	fkprog->len = fprog->len;
-	fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
+
+	fkprog->filter = kmemdup(fp->insns, fsize,
+				 GFP_KERNEL | __GFP_NOWARN);
 	if (!fkprog->filter) {
 		kfree(fp->orig_prog);
 		return -ENOMEM;
@@ -941,7 +946,7 @@
 	 * pass. At this time, the user BPF is stored in fp->insns.
 	 */
 	old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
-			   GFP_KERNEL);
+			   GFP_KERNEL | __GFP_NOWARN);
 	if (!old_prog) {
 		err = -ENOMEM;
 		goto out_err;
@@ -988,7 +993,8 @@
 	return ERR_PTR(err);
 }
 
-static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
+static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
+					   bpf_aux_classic_check_t trans)
 {
 	int err;
 
@@ -1001,6 +1007,17 @@
 		return ERR_PTR(err);
 	}
 
+	/* There might be additional checks and transformations
+	 * needed on classic filters, f.e. in case of seccomp.
+	 */
+	if (trans) {
+		err = trans(fp->insns, fp->len);
+		if (err) {
+			__bpf_prog_release(fp);
+			return ERR_PTR(err);
+		}
+	}
+
 	/* Probe if we can JIT compile the filter and if so, do
 	 * the compilation of the filter.
 	 */
@@ -1050,7 +1067,7 @@
 	/* bpf_prepare_filter() already takes care of freeing
 	 * memory in case something goes wrong.
 	 */
-	fp = bpf_prepare_filter(fp);
+	fp = bpf_prepare_filter(fp, NULL);
 	if (IS_ERR(fp))
 		return PTR_ERR(fp);
 
@@ -1059,6 +1076,53 @@
 }
 EXPORT_SYMBOL_GPL(bpf_prog_create);
 
+/**
+ *	bpf_prog_create_from_user - create an unattached filter from user buffer
+ *	@pfp: the unattached filter that is created
+ *	@fprog: the filter program
+ *	@trans: post-classic verifier transformation handler
+ *
+ * This function effectively does the same as bpf_prog_create(), only
+ * that it builds up its insns buffer from user space provided buffer.
+ * It also allows for passing a bpf_aux_classic_check_t handler.
+ */
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+			      bpf_aux_classic_check_t trans)
+{
+	unsigned int fsize = bpf_classic_proglen(fprog);
+	struct bpf_prog *fp;
+
+	/* Make sure new filter is there and in the right amounts. */
+	if (fprog->filter == NULL)
+		return -EINVAL;
+
+	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
+	if (!fp)
+		return -ENOMEM;
+
+	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
+		__bpf_prog_free(fp);
+		return -EFAULT;
+	}
+
+	fp->len = fprog->len;
+	/* Since unattached filters are not copied back to user
+	 * space through sk_get_filter(), we do not need to hold
+	 * a copy here, and can spare us the work.
+	 */
+	fp->orig_prog = NULL;
+
+	/* bpf_prepare_filter() already takes care of freeing
+	 * memory in case something goes wrong.
+	 */
+	fp = bpf_prepare_filter(fp, trans);
+	if (IS_ERR(fp))
+		return PTR_ERR(fp);
+
+	*pfp = fp;
+	return 0;
+}
+
 void bpf_prog_destroy(struct bpf_prog *fp)
 {
 	__bpf_prog_release(fp);
@@ -1135,7 +1199,7 @@
 	/* bpf_prepare_filter() already takes care of freeing
 	 * memory in case something goes wrong.
 	 */
-	prog = bpf_prepare_filter(prog);
+	prog = bpf_prepare_filter(prog, NULL);
 	if (IS_ERR(prog))
 		return PTR_ERR(prog);
 
@@ -1175,21 +1239,6 @@
 	return 0;
 }
 
-/**
- *	bpf_skb_clone_not_writable - is the header of a clone not writable
- *	@skb: buffer to check
- *	@len: length up to which to write, can be negative
- *
- *	Returns true if modifying the header part of the cloned buffer
- *	does require the data to be copied. I.e. this version works with
- *	negative lengths needed for eBPF case!
- */
-static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
-{
-	return skb_header_cloned(skb) ||
-	       (int) skb_headroom(skb) + len > skb->hdr_len;
-}
-
 #define BPF_RECOMPUTE_CSUM(flags)	((flags) & 1)
 
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
@@ -1212,9 +1261,8 @@
 	if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
 		return -EFAULT;
 
-	offset -= skb->data - skb_mac_header(skb);
 	if (unlikely(skb_cloned(skb) &&
-		     bpf_skb_clone_unwritable(skb, offset + len)))
+		     !skb_clone_writable(skb, offset + len)))
 		return -EFAULT;
 
 	ptr = skb_header_pointer(skb, offset, len, buf);
@@ -1258,9 +1306,8 @@
 	if (unlikely((u32) offset > 0xffff))
 		return -EFAULT;
 
-	offset -= skb->data - skb_mac_header(skb);
 	if (unlikely(skb_cloned(skb) &&
-		     bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+		     !skb_clone_writable(skb, offset + sizeof(sum))))
 		return -EFAULT;
 
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1306,9 +1353,8 @@
 	if (unlikely((u32) offset > 0xffff))
 		return -EFAULT;
 
-	offset -= skb->data - skb_mac_header(skb);
 	if (unlikely(skb_cloned(skb) &&
-		     bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+		     !skb_clone_writable(skb, offset + sizeof(sum))))
 		return -EFAULT;
 
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1344,6 +1390,40 @@
 	.arg5_type	= ARG_ANYTHING,
 };
 
+#define BPF_IS_REDIRECT_INGRESS(flags)	((flags) & 1)
+
+static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
+	struct net_device *dev;
+
+	dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
+	if (unlikely(!dev))
+		return -EINVAL;
+
+	if (unlikely(!(dev->flags & IFF_UP)))
+		return -EINVAL;
+
+	skb2 = skb_clone(skb, GFP_ATOMIC);
+	if (unlikely(!skb2))
+		return -ENOMEM;
+
+	if (BPF_IS_REDIRECT_INGRESS(flags))
+		return dev_forward_skb(dev, skb2);
+
+	skb2->dev = dev;
+	return dev_queue_xmit(skb2);
+}
+
+const struct bpf_func_proto bpf_clone_redirect_proto = {
+	.func           = bpf_clone_redirect,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+	.arg2_type      = ARG_ANYTHING,
+	.arg3_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -1358,6 +1438,12 @@
 		return &bpf_get_prandom_u32_proto;
 	case BPF_FUNC_get_smp_processor_id:
 		return &bpf_get_smp_processor_id_proto;
+	case BPF_FUNC_tail_call:
+		return &bpf_tail_call_proto;
+	case BPF_FUNC_ktime_get_ns:
+		return &bpf_ktime_get_ns_proto;
+	case BPF_FUNC_trace_printk:
+		return bpf_get_trace_printk_proto();
 	default:
 		return NULL;
 	}
@@ -1373,18 +1459,15 @@
 		return &bpf_l3_csum_replace_proto;
 	case BPF_FUNC_l4_csum_replace:
 		return &bpf_l4_csum_replace_proto;
+	case BPF_FUNC_clone_redirect:
+		return &bpf_clone_redirect_proto;
 	default:
 		return sk_filter_func_proto(func_id);
 	}
 }
 
-static bool sk_filter_is_valid_access(int off, int size,
-				      enum bpf_access_type type)
+static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
-	/* only read is allowed */
-	if (type != BPF_READ)
-		return false;
-
 	/* check bounds */
 	if (off < 0 || off >= sizeof(struct __sk_buff))
 		return false;
@@ -1400,8 +1483,42 @@
 	return true;
 }
 
-static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
-					struct bpf_insn *insn_buf)
+static bool sk_filter_is_valid_access(int off, int size,
+				      enum bpf_access_type type)
+{
+	if (type == BPF_WRITE) {
+		switch (off) {
+		case offsetof(struct __sk_buff, cb[0]) ...
+			offsetof(struct __sk_buff, cb[4]):
+			break;
+		default:
+			return false;
+		}
+	}
+
+	return __is_valid_access(off, size, type);
+}
+
+static bool tc_cls_act_is_valid_access(int off, int size,
+				       enum bpf_access_type type)
+{
+	if (type == BPF_WRITE) {
+		switch (off) {
+		case offsetof(struct __sk_buff, mark):
+		case offsetof(struct __sk_buff, tc_index):
+		case offsetof(struct __sk_buff, cb[0]) ...
+			offsetof(struct __sk_buff, cb[4]):
+			break;
+		default:
+			return false;
+		}
+	}
+	return __is_valid_access(off, size, type);
+}
+
+static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+				      int src_reg, int ctx_off,
+				      struct bpf_insn *insn_buf)
 {
 	struct bpf_insn *insn = insn_buf;
 
@@ -1434,8 +1551,34 @@
 				      offsetof(struct sk_buff, priority));
 		break;
 
+	case offsetof(struct __sk_buff, ingress_ifindex):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
+
+		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+				      offsetof(struct sk_buff, skb_iif));
+		break;
+
+	case offsetof(struct __sk_buff, ifindex):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+
+		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+				      dst_reg, src_reg,
+				      offsetof(struct sk_buff, dev));
+		*insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
+		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+				      offsetof(struct net_device, ifindex));
+		break;
+
 	case offsetof(struct __sk_buff, mark):
-		return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+
+		if (type == BPF_WRITE)
+			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+					      offsetof(struct sk_buff, mark));
+		else
+			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+					      offsetof(struct sk_buff, mark));
+		break;
 
 	case offsetof(struct __sk_buff, pkt_type):
 		return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
@@ -1450,6 +1593,38 @@
 	case offsetof(struct __sk_buff, vlan_tci):
 		return convert_skb_access(SKF_AD_VLAN_TAG,
 					  dst_reg, src_reg, insn);
+
+	case offsetof(struct __sk_buff, cb[0]) ...
+		offsetof(struct __sk_buff, cb[4]):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+
+		ctx_off -= offsetof(struct __sk_buff, cb[0]);
+		ctx_off += offsetof(struct sk_buff, cb);
+		ctx_off += offsetof(struct qdisc_skb_cb, data);
+		if (type == BPF_WRITE)
+			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+		else
+			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+		break;
+
+	case offsetof(struct __sk_buff, tc_index):
+#ifdef CONFIG_NET_SCHED
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
+
+		if (type == BPF_WRITE)
+			*insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
+					      offsetof(struct sk_buff, tc_index));
+		else
+			*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+					      offsetof(struct sk_buff, tc_index));
+		break;
+#else
+		if (type == BPF_WRITE)
+			*insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
+		else
+			*insn++ = BPF_MOV64_IMM(dst_reg, 0);
+		break;
+#endif
 	}
 
 	return insn - insn_buf;
@@ -1458,13 +1633,13 @@
 static const struct bpf_verifier_ops sk_filter_ops = {
 	.get_func_proto = sk_filter_func_proto,
 	.is_valid_access = sk_filter_is_valid_access,
-	.convert_ctx_access = sk_filter_convert_ctx_access,
+	.convert_ctx_access = bpf_net_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops tc_cls_act_ops = {
 	.get_func_proto = tc_cls_act_func_proto,
-	.is_valid_access = sk_filter_is_valid_access,
-	.convert_ctx_access = sk_filter_convert_ctx_access,
+	.is_valid_access = tc_cls_act_is_valid_access,
+	.convert_ctx_access = bpf_net_convert_ctx_access,
 };
 
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2c35c02..476e5dd 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/export.h>
 #include <linux/ip.h>
@@ -12,20 +13,61 @@
 #include <linux/if_tunnel.h>
 #include <linux/if_pppox.h>
 #include <linux/ppp_defs.h>
-#include <net/flow_keys.h>
+#include <linux/stddef.h>
+#include <linux/if_ether.h>
+#include <linux/mpls.h>
+#include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
 
-/* copy saddr & daddr, possibly using 64bit load/store
- * Equivalent to :	flow->src = iph->saddr;
- *			flow->dst = iph->daddr;
- */
-static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
+					enum flow_dissector_key_id key_id)
 {
-	BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
-		     offsetof(typeof(*flow), src) + sizeof(flow->src));
-	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+	return flow_dissector->used_keys & (1 << key_id);
 }
 
+static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
+				       enum flow_dissector_key_id key_id)
+{
+	flow_dissector->used_keys |= (1 << key_id);
+}
+
+static void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
+				       enum flow_dissector_key_id key_id,
+				       void *target_container)
+{
+	return ((char *) target_container) + flow_dissector->offset[key_id];
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+			     const struct flow_dissector_key *key,
+			     unsigned int key_count)
+{
+	unsigned int i;
+
+	memset(flow_dissector, 0, sizeof(*flow_dissector));
+
+	for (i = 0; i < key_count; i++, key++) {
+		/* User should make sure that every key target offset is withing
+		 * boundaries of unsigned short.
+		 */
+		BUG_ON(key->offset > USHRT_MAX);
+		BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
+						   key->key_id));
+
+		skb_flow_dissector_set_key(flow_dissector, key->key_id);
+		flow_dissector->offset[key->key_id] = key->offset;
+	}
+
+	/* Ensure that the dissector always includes control and basic key.
+	 * That way we are able to avoid handling lack of these in fast path.
+	 */
+	BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
+					    FLOW_DISSECTOR_KEY_CONTROL));
+	BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
+					    FLOW_DISSECTOR_KEY_BASIC));
+}
+EXPORT_SYMBOL(skb_flow_dissector_init);
+
 /**
  * __skb_flow_get_ports - extract the upper layer ports and return them
  * @skb: sk_buff to extract the ports from
@@ -63,17 +105,30 @@
 /**
  * __skb_flow_dissect - extract the flow_keys struct and return it
  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
+ * @flow_dissector: list of keys to dissect
+ * @target_container: target structure to put dissected values into
  * @data: raw buffer pointer to the packet, if NULL use skb->data
  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  *
- * The function will try to retrieve the struct flow_keys from either the skbuff
- * or a raw buffer specified by the rest parameters
+ * The function will try to retrieve individual keys into target specified
+ * by flow_dissector from either the skbuff or a raw buffer specified by the
+ * rest parameters.
+ *
+ * Caller must take care of zeroing target container memory.
  */
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+bool __skb_flow_dissect(const struct sk_buff *skb,
+			struct flow_dissector *flow_dissector,
+			void *target_container,
 			void *data, __be16 proto, int nhoff, int hlen)
 {
+	struct flow_dissector_key_control *key_control;
+	struct flow_dissector_key_basic *key_basic;
+	struct flow_dissector_key_addrs *key_addrs;
+	struct flow_dissector_key_ports *key_ports;
+	struct flow_dissector_key_tags *key_tags;
+	struct flow_dissector_key_keyid *key_keyid;
 	u8 ip_proto;
 
 	if (!data) {
@@ -83,7 +138,30 @@
 		hlen = skb_headlen(skb);
 	}
 
-	memset(flow, 0, sizeof(*flow));
+	/* It is ensured by skb_flow_dissector_init() that control key will
+	 * be always present.
+	 */
+	key_control = skb_flow_dissector_target(flow_dissector,
+						FLOW_DISSECTOR_KEY_CONTROL,
+						target_container);
+
+	/* It is ensured by skb_flow_dissector_init() that basic key will
+	 * be always present.
+	 */
+	key_basic = skb_flow_dissector_target(flow_dissector,
+					      FLOW_DISSECTOR_KEY_BASIC,
+					      target_container);
+
+	if (skb_flow_dissector_uses_key(flow_dissector,
+					FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+		struct ethhdr *eth = eth_hdr(skb);
+		struct flow_dissector_key_eth_addrs *key_eth_addrs;
+
+		key_eth_addrs = skb_flow_dissector_target(flow_dissector,
+							  FLOW_DISSECTOR_KEY_ETH_ADDRS,
+							  target_container);
+		memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+	}
 
 again:
 	switch (proto) {
@@ -100,14 +178,15 @@
 		if (ip_is_fragment(iph))
 			ip_proto = 0;
 
-		/* skip the address processing if skb is NULL.  The assumption
-		 * here is that if there is no skb we are not looking for flow
-		 * info but lengths and protocols.
-		 */
-		if (!skb)
+		if (!skb_flow_dissector_uses_key(flow_dissector,
+						 FLOW_DISSECTOR_KEY_IPV4_ADDRS))
 			break;
 
-		iph_to_flow_copy_addrs(flow, iph);
+		key_addrs = skb_flow_dissector_target(flow_dissector,
+			      FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container);
+		memcpy(&key_addrs->v4addrs, &iph->saddr,
+		       sizeof(key_addrs->v4addrs));
+		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 		break;
 	}
 	case htons(ETH_P_IPV6): {
@@ -123,25 +202,27 @@
 		ip_proto = iph->nexthdr;
 		nhoff += sizeof(struct ipv6hdr);
 
-		/* see comment above in IPv4 section */
-		if (!skb)
-			break;
+		if (skb_flow_dissector_uses_key(flow_dissector,
+						FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+			struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
 
-		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
-		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+			key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
+								   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+								   target_container);
+
+			memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs));
+			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+		}
 
 		flow_label = ip6_flowlabel(iph);
 		if (flow_label) {
-			/* Awesome, IPv6 packet has a flow label so we can
-			 * use that to represent the ports without any
-			 * further dissection.
-			 */
-			flow->n_proto = proto;
-			flow->ip_proto = ip_proto;
-			flow->ports = flow_label;
-			flow->thoff = (u16)nhoff;
-
-			return true;
+			if (skb_flow_dissector_uses_key(flow_dissector,
+				FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+				key_tags = skb_flow_dissector_target(flow_dissector,
+								     FLOW_DISSECTOR_KEY_FLOW_LABEL,
+								     target_container);
+				key_tags->flow_label = ntohl(flow_label);
+			}
 		}
 
 		break;
@@ -155,6 +236,15 @@
 		if (!vlan)
 			return false;
 
+		if (skb_flow_dissector_uses_key(flow_dissector,
+						FLOW_DISSECTOR_KEY_VLANID)) {
+			key_tags = skb_flow_dissector_target(flow_dissector,
+							     FLOW_DISSECTOR_KEY_VLANID,
+							     target_container);
+
+			key_tags->vlan_id = skb_vlan_tag_get_id(skb);
+		}
+
 		proto = vlan->h_vlan_encapsulated_proto;
 		nhoff += sizeof(*vlan);
 		goto again;
@@ -186,19 +276,58 @@
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
 			return false;
-		flow->src = hdr->srcnode;
-		flow->dst = 0;
-		flow->n_proto = proto;
-		flow->thoff = (u16)nhoff;
+		key_basic->n_proto = proto;
+		key_control->thoff = (u16)nhoff;
+
+		if (skb_flow_dissector_uses_key(flow_dissector,
+						FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
+			key_addrs = skb_flow_dissector_target(flow_dissector,
+							      FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+							      target_container);
+			key_addrs->tipcaddrs.srcnode = hdr->srcnode;
+			key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
+		}
 		return true;
 	}
+
+	case htons(ETH_P_MPLS_UC):
+	case htons(ETH_P_MPLS_MC): {
+		struct mpls_label *hdr, _hdr[2];
+mpls:
+		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
+					   hlen, &_hdr);
+		if (!hdr)
+			return false;
+
+		if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
+		     MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
+			if (skb_flow_dissector_uses_key(flow_dissector,
+							FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
+				key_keyid = skb_flow_dissector_target(flow_dissector,
+								      FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
+								      target_container);
+				key_keyid->keyid = hdr[1].entry &
+					htonl(MPLS_LS_LABEL_MASK);
+			}
+
+			key_basic->n_proto = proto;
+			key_basic->ip_proto = ip_proto;
+			key_control->thoff = (u16)nhoff;
+
+			return true;
+		}
+
+		return true;
+	}
+
 	case htons(ETH_P_FCOE):
-		flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
+		key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
 		/* fall through */
 	default:
 		return false;
 	}
 
+ip_proto_again:
 	switch (ip_proto) {
 	case IPPROTO_GRE: {
 		struct gre_hdr {
@@ -213,30 +342,65 @@
 		 * Only look inside GRE if version zero and no
 		 * routing
 		 */
-		if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
-			proto = hdr->proto;
-			nhoff += 4;
-			if (hdr->flags & GRE_CSUM)
-				nhoff += 4;
-			if (hdr->flags & GRE_KEY)
-				nhoff += 4;
-			if (hdr->flags & GRE_SEQ)
-				nhoff += 4;
-			if (proto == htons(ETH_P_TEB)) {
-				const struct ethhdr *eth;
-				struct ethhdr _eth;
+		if (hdr->flags & (GRE_VERSION | GRE_ROUTING))
+			break;
 
-				eth = __skb_header_pointer(skb, nhoff,
-							   sizeof(_eth),
-							   data, hlen, &_eth);
-				if (!eth)
-					return false;
-				proto = eth->h_proto;
-				nhoff += sizeof(*eth);
+		proto = hdr->proto;
+		nhoff += 4;
+		if (hdr->flags & GRE_CSUM)
+			nhoff += 4;
+		if (hdr->flags & GRE_KEY) {
+			const __be32 *keyid;
+			__be32 _keyid;
+
+			keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid),
+						     data, hlen, &_keyid);
+
+			if (!keyid)
+				return false;
+
+			if (skb_flow_dissector_uses_key(flow_dissector,
+							FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+				key_keyid = skb_flow_dissector_target(flow_dissector,
+								      FLOW_DISSECTOR_KEY_GRE_KEYID,
+								      target_container);
+				key_keyid->keyid = *keyid;
 			}
-			goto again;
+			nhoff += 4;
 		}
-		break;
+		if (hdr->flags & GRE_SEQ)
+			nhoff += 4;
+		if (proto == htons(ETH_P_TEB)) {
+			const struct ethhdr *eth;
+			struct ethhdr _eth;
+
+			eth = __skb_header_pointer(skb, nhoff,
+						   sizeof(_eth),
+						   data, hlen, &_eth);
+			if (!eth)
+				return false;
+			proto = eth->h_proto;
+			nhoff += sizeof(*eth);
+		}
+		goto again;
+	}
+	case NEXTHDR_HOP:
+	case NEXTHDR_ROUTING:
+	case NEXTHDR_DEST: {
+		u8 _opthdr[2], *opthdr;
+
+		if (proto != htons(ETH_P_IPV6))
+			break;
+
+		opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
+					      data, hlen, &_opthdr);
+		if (!opthdr)
+			return false;
+
+		ip_proto = opthdr[0];
+		nhoff += (opthdr[1] + 1) << 3;
+
+		goto ip_proto_again;
 	}
 	case IPPROTO_IPIP:
 		proto = htons(ETH_P_IP);
@@ -244,18 +408,25 @@
 	case IPPROTO_IPV6:
 		proto = htons(ETH_P_IPV6);
 		goto ipv6;
+	case IPPROTO_MPLS:
+		proto = htons(ETH_P_MPLS_UC);
+		goto mpls;
 	default:
 		break;
 	}
 
-	flow->n_proto = proto;
-	flow->ip_proto = ip_proto;
-	flow->thoff = (u16) nhoff;
+	key_basic->n_proto = proto;
+	key_basic->ip_proto = ip_proto;
+	key_control->thoff = (u16)nhoff;
 
-	/* unless skb is set we don't need to record port info */
-	if (skb)
-		flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
-						   data, hlen);
+	if (skb_flow_dissector_uses_key(flow_dissector,
+					FLOW_DISSECTOR_KEY_PORTS)) {
+		key_ports = skb_flow_dissector_target(flow_dissector,
+						      FLOW_DISSECTOR_KEY_PORTS,
+						      target_container);
+		key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+							data, hlen);
+	}
 
 	return true;
 }
@@ -267,27 +438,109 @@
 	net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
+static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
 {
-	__flow_hash_secret_init();
-	return jhash_3words(a, b, c, hashrnd);
+	return jhash2(words, length, keyval);
 }
 
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
+static inline void *flow_keys_hash_start(struct flow_keys *flow)
+{
+	BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
+	return (void *)flow + FLOW_KEYS_HASH_OFFSET;
+}
+
+static inline size_t flow_keys_hash_length(struct flow_keys *flow)
+{
+	size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+	BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
+	BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
+		     sizeof(*flow) - sizeof(flow->addrs));
+
+	switch (flow->control.addr_type) {
+	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+		diff -= sizeof(flow->addrs.v4addrs);
+		break;
+	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+		diff -= sizeof(flow->addrs.v6addrs);
+		break;
+	case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+		diff -= sizeof(flow->addrs.tipcaddrs);
+		break;
+	}
+	return (sizeof(*flow) - diff) / sizeof(u32);
+}
+
+__be32 flow_get_u32_src(const struct flow_keys *flow)
+{
+	switch (flow->control.addr_type) {
+	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+		return flow->addrs.v4addrs.src;
+	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+		return (__force __be32)ipv6_addr_hash(
+			&flow->addrs.v6addrs.src);
+	case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+		return flow->addrs.tipcaddrs.srcnode;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(flow_get_u32_src);
+
+__be32 flow_get_u32_dst(const struct flow_keys *flow)
+{
+	switch (flow->control.addr_type) {
+	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+		return flow->addrs.v4addrs.dst;
+	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+		return (__force __be32)ipv6_addr_hash(
+			&flow->addrs.v6addrs.dst);
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(flow_get_u32_dst);
+
+static inline void __flow_hash_consistentify(struct flow_keys *keys)
+{
+	int addr_diff, i;
+
+	switch (keys->control.addr_type) {
+	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+		addr_diff = (__force u32)keys->addrs.v4addrs.dst -
+			    (__force u32)keys->addrs.v4addrs.src;
+		if ((addr_diff < 0) ||
+		    (addr_diff == 0 &&
+		     ((__force u16)keys->ports.dst <
+		      (__force u16)keys->ports.src))) {
+			swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+			swap(keys->ports.src, keys->ports.dst);
+		}
+		break;
+	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+		addr_diff = memcmp(&keys->addrs.v6addrs.dst,
+				   &keys->addrs.v6addrs.src,
+				   sizeof(keys->addrs.v6addrs.dst));
+		if ((addr_diff < 0) ||
+		    (addr_diff == 0 &&
+		     ((__force u16)keys->ports.dst <
+		      (__force u16)keys->ports.src))) {
+			for (i = 0; i < 4; i++)
+				swap(keys->addrs.v6addrs.src.s6_addr32[i],
+				     keys->addrs.v6addrs.dst.s6_addr32[i]);
+			swap(keys->ports.src, keys->ports.dst);
+		}
+		break;
+	}
+}
+
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
 {
 	u32 hash;
 
-	/* get a consistent hash (same value on both flow directions) */
-	if (((__force u32)keys->dst < (__force u32)keys->src) ||
-	    (((__force u32)keys->dst == (__force u32)keys->src) &&
-	     ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
-		swap(keys->dst, keys->src);
-		swap(keys->port16[0], keys->port16[1]);
-	}
+	__flow_hash_consistentify(keys);
 
-	hash = __flow_hash_3words((__force u32)keys->dst,
-				  (__force u32)keys->src,
-				  (__force u32)keys->ports);
+	hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
+				 flow_keys_hash_length(keys), keyval);
 	if (!hash)
 		hash = 1;
 
@@ -296,12 +549,52 @@
 
 u32 flow_hash_from_keys(struct flow_keys *keys)
 {
-	return __flow_hash_from_keys(keys);
+	__flow_hash_secret_init();
+	return __flow_hash_from_keys(keys, hashrnd);
 }
 EXPORT_SYMBOL(flow_hash_from_keys);
 
-/*
- * __skb_get_hash: calculate a flow hash based on src/dst addresses
+static inline u32 ___skb_get_hash(const struct sk_buff *skb,
+				  struct flow_keys *keys, u32 keyval)
+{
+	if (!skb_flow_dissect_flow_keys(skb, keys))
+		return 0;
+
+	return __flow_hash_from_keys(keys, keyval);
+}
+
+struct _flow_keys_digest_data {
+	__be16	n_proto;
+	u8	ip_proto;
+	u8	padding;
+	__be32	ports;
+	__be32	src;
+	__be32	dst;
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+			   const struct flow_keys *flow)
+{
+	struct _flow_keys_digest_data *data =
+	    (struct _flow_keys_digest_data *)digest;
+
+	BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
+
+	memset(digest, 0, sizeof(*digest));
+
+	data->n_proto = flow->basic.n_proto;
+	data->ip_proto = flow->basic.ip_proto;
+	data->ports = flow->ports.ports;
+	data->src = flow->addrs.v4addrs.src;
+	data->dst = flow->addrs.v4addrs.dst;
+}
+EXPORT_SYMBOL(make_flow_keys_digest);
+
+/**
+ * __skb_get_hash: calculate a flow hash
+ * @skb: sk_buff to calculate flow hash from
+ *
+ * This function calculates a flow hash based on src/dst addresses
  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
  * if hash is a canonical 4-tuple hash over transport ports.
@@ -309,53 +602,34 @@
 void __skb_get_hash(struct sk_buff *skb)
 {
 	struct flow_keys keys;
+	u32 hash;
 
-	if (!skb_flow_dissect(skb, &keys))
+	__flow_hash_secret_init();
+
+	hash = ___skb_get_hash(skb, &keys, hashrnd);
+	if (!hash)
 		return;
-
-	if (keys.ports)
+	if (keys.ports.ports)
 		skb->l4_hash = 1;
-
 	skb->sw_hash = 1;
-
-	skb->hash = __flow_hash_from_keys(&keys);
+	skb->hash = hash;
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
-/*
- * Returns a Tx hash based on the given packet descriptor a Tx queues' number
- * to be used as a distribution range.
- */
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
-		  unsigned int num_tx_queues)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
 {
-	u32 hash;
-	u16 qoffset = 0;
-	u16 qcount = num_tx_queues;
+	struct flow_keys keys;
 
-	if (skb_rx_queue_recorded(skb)) {
-		hash = skb_get_rx_queue(skb);
-		while (unlikely(hash >= num_tx_queues))
-			hash -= num_tx_queues;
-		return hash;
-	}
-
-	if (dev->num_tc) {
-		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
-		qoffset = dev->tc_to_txq[tc].offset;
-		qcount = dev->tc_to_txq[tc].count;
-	}
-
-	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
+	return ___skb_get_hash(skb, &keys, perturb);
 }
-EXPORT_SYMBOL(__skb_tx_hash);
+EXPORT_SYMBOL(skb_get_hash_perturb);
 
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
 		   const struct flow_keys *keys, int hlen)
 {
-	u32 poff = keys->thoff;
+	u32 poff = keys->control.thoff;
 
-	switch (keys->ip_proto) {
+	switch (keys->basic.ip_proto) {
 	case IPPROTO_TCP: {
 		/* access doff as u8 to avoid unaligned access */
 		const u8 *doff;
@@ -396,8 +670,12 @@
 	return poff;
 }
 
-/* skb_get_poff() returns the offset to the payload as far as it could
- * be dissected. The main user is currently BPF, so that we can dynamically
+/**
+ * skb_get_poff - get the offset to the payload
+ * @skb: sk_buff to get the payload offset from
+ *
+ * The function will get the offset to the payload as far as it could
+ * be dissected.  The main user is currently BPF, so that we can dynamically
  * truncate packets without needing to push actual payload to the user
  * space and can analyze headers only, instead.
  */
@@ -405,86 +683,76 @@
 {
 	struct flow_keys keys;
 
-	if (!skb_flow_dissect(skb, &keys))
+	if (!skb_flow_dissect_flow_keys(skb, &keys))
 		return 0;
 
 	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
 }
 
-static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+static const struct flow_dissector_key flow_keys_dissector_keys[] = {
+	{
+		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
+		.offset = offsetof(struct flow_keys, control),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_BASIC,
+		.offset = offsetof(struct flow_keys, basic),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+		.offset = offsetof(struct flow_keys, addrs.v4addrs),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+		.offset = offsetof(struct flow_keys, addrs.v6addrs),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+		.offset = offsetof(struct flow_keys, addrs.tipcaddrs),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_PORTS,
+		.offset = offsetof(struct flow_keys, ports),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_VLANID,
+		.offset = offsetof(struct flow_keys, tags),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+		.offset = offsetof(struct flow_keys, tags),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+		.offset = offsetof(struct flow_keys, keyid),
+	},
+};
+
+static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
+	{
+		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
+		.offset = offsetof(struct flow_keys, control),
+	},
+	{
+		.key_id = FLOW_DISSECTOR_KEY_BASIC,
+		.offset = offsetof(struct flow_keys, basic),
+	},
+};
+
+struct flow_dissector flow_keys_dissector __read_mostly;
+EXPORT_SYMBOL(flow_keys_dissector);
+
+struct flow_dissector flow_keys_buf_dissector __read_mostly;
+
+static int __init init_default_flow_dissectors(void)
 {
-#ifdef CONFIG_XPS
-	struct xps_dev_maps *dev_maps;
-	struct xps_map *map;
-	int queue_index = -1;
-
-	rcu_read_lock();
-	dev_maps = rcu_dereference(dev->xps_maps);
-	if (dev_maps) {
-		map = rcu_dereference(
-		    dev_maps->cpu_map[skb->sender_cpu - 1]);
-		if (map) {
-			if (map->len == 1)
-				queue_index = map->queues[0];
-			else
-				queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
-									   map->len)];
-			if (unlikely(queue_index >= dev->real_num_tx_queues))
-				queue_index = -1;
-		}
-	}
-	rcu_read_unlock();
-
-	return queue_index;
-#else
-	return -1;
-#endif
+	skb_flow_dissector_init(&flow_keys_dissector,
+				flow_keys_dissector_keys,
+				ARRAY_SIZE(flow_keys_dissector_keys));
+	skb_flow_dissector_init(&flow_keys_buf_dissector,
+				flow_keys_buf_dissector_keys,
+				ARRAY_SIZE(flow_keys_buf_dissector_keys));
+	return 0;
 }
 
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
-{
-	struct sock *sk = skb->sk;
-	int queue_index = sk_tx_queue_get(sk);
-
-	if (queue_index < 0 || skb->ooo_okay ||
-	    queue_index >= dev->real_num_tx_queues) {
-		int new_index = get_xps_queue(dev, skb);
-		if (new_index < 0)
-			new_index = skb_tx_hash(dev, skb);
-
-		if (queue_index != new_index && sk &&
-		    rcu_access_pointer(sk->sk_dst_cache))
-			sk_tx_queue_set(sk, new_index);
-
-		queue_index = new_index;
-	}
-
-	return queue_index;
-}
-
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-				    struct sk_buff *skb,
-				    void *accel_priv)
-{
-	int queue_index = 0;
-
-#ifdef CONFIG_XPS
-	if (skb->sender_cpu == 0)
-		skb->sender_cpu = raw_smp_processor_id() + 1;
-#endif
-
-	if (dev->real_num_tx_queues != 1) {
-		const struct net_device_ops *ops = dev->netdev_ops;
-		if (ops->ndo_select_queue)
-			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
-							    __netdev_pick_tx);
-		else
-			queue_index = __netdev_pick_tx(dev, skb);
-
-		if (!accel_priv)
-			queue_index = netdev_cap_txqueue(dev, queue_index);
-	}
-
-	skb_set_queue_mapping(skb, queue_index);
-	return netdev_get_tx_queue(dev, queue_index);
-}
+late_initcall_sync(init_default_flow_dissectors);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 3de6542..84195da 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -913,6 +913,7 @@
 			neigh->nud_state = NUD_PROBE;
 			neigh->updated = jiffies;
 			atomic_set(&neigh->probes, 0);
+			notify = 1;
 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 		}
 	} else {
@@ -957,6 +958,8 @@
 	rc = 0;
 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
 		goto out_unlock_bh;
+	if (neigh->dead)
+		goto out_dead;
 
 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
@@ -1013,6 +1016,13 @@
 		write_unlock(&neigh->lock);
 	local_bh_enable();
 	return rc;
+
+out_dead:
+	if (neigh->nud_state & NUD_STALE)
+		goto out_unlock_bh;
+	write_unlock_bh(&neigh->lock);
+	kfree_skb(skb);
+	return 1;
 }
 EXPORT_SYMBOL(__neigh_event_send);
 
@@ -1076,6 +1086,8 @@
 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
 	    (old & (NUD_NOARP | NUD_PERMANENT)))
 		goto out;
+	if (neigh->dead)
+		goto out;
 
 	if (!(new & NUD_VALID)) {
 		neigh_del_timer(neigh);
@@ -1144,6 +1156,8 @@
 
 	if (new != old) {
 		neigh_del_timer(neigh);
+		if (new & NUD_PROBE)
+			atomic_set(&neigh->probes, 0);
 		if (new & NUD_IN_TIMER)
 			neigh_add_timer(neigh, (jiffies +
 						((new & NUD_REACHABLE) ?
@@ -1225,6 +1239,8 @@
  */
 void __neigh_set_probe_once(struct neighbour *neigh)
 {
+	if (neigh->dead)
+		return;
 	neigh->updated = jiffies;
 	if (!(neigh->nud_state & NUD_FAILED))
 		return;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4238d6d..18b34d7 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -458,11 +458,15 @@
 		return restart_syscall();
 
 	if (dev_isalive(netdev)) {
-		struct netdev_phys_item_id ppid;
+		struct switchdev_attr attr = {
+			.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+			.flags = SWITCHDEV_F_NO_RECURSE,
+		};
 
-		ret = netdev_switch_parent_id_get(netdev, &ppid);
+		ret = switchdev_port_attr_get(netdev, &attr);
 		if (!ret)
-			ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+			ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
+				      attr.u.ppid.id);
 	}
 	rtnl_unlock();
 
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 572af00..2c2eb1b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -147,24 +147,17 @@
 	}
 }
 
-static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
-			      int id);
+/* should be called with nsid_lock held */
 static int alloc_netid(struct net *net, struct net *peer, int reqid)
 {
-	int min = 0, max = 0, id;
-
-	ASSERT_RTNL();
+	int min = 0, max = 0;
 
 	if (reqid >= 0) {
 		min = reqid;
 		max = reqid + 1;
 	}
 
-	id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
-	if (id >= 0)
-		rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
-
-	return id;
+	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
 }
 
 /* This function is used by idr_for_each(). If net is equal to peer, the
@@ -180,11 +173,16 @@
 	return 0;
 }
 
-static int __peernet2id(struct net *net, struct net *peer, bool alloc)
+/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
+ * is set to true, thus the caller knows that the new id must be notified via
+ * rtnl.
+ */
+static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
 {
 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
+	bool alloc_it = *alloc;
 
-	ASSERT_RTNL();
+	*alloc = false;
 
 	/* Magic value for id 0. */
 	if (id == NET_ID_ZERO)
@@ -192,36 +190,77 @@
 	if (id > 0)
 		return id;
 
-	if (alloc)
-		return alloc_netid(net, peer, -1);
+	if (alloc_it) {
+		id = alloc_netid(net, peer, -1);
+		*alloc = true;
+		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+	}
 
-	return -ENOENT;
+	return NETNSA_NSID_NOT_ASSIGNED;
 }
 
+/* should be called with nsid_lock held */
+static int __peernet2id(struct net *net, struct net *peer)
+{
+	bool no = false;
+
+	return __peernet2id_alloc(net, peer, &no);
+}
+
+static void rtnl_net_notifyid(struct net *net, int cmd, int id);
 /* This function returns the id of a peer netns. If no id is assigned, one will
  * be allocated and returned.
  */
-int peernet2id(struct net *net, struct net *peer)
+int peernet2id_alloc(struct net *net, struct net *peer)
 {
-	bool alloc = atomic_read(&peer->count) == 0 ? false : true;
+	unsigned long flags;
+	bool alloc;
 	int id;
 
-	id = __peernet2id(net, peer, alloc);
-	return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+	spin_lock_irqsave(&net->nsid_lock, flags);
+	alloc = atomic_read(&peer->count) == 0 ? false : true;
+	id = __peernet2id_alloc(net, peer, &alloc);
+	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	if (alloc && id >= 0)
+		rtnl_net_notifyid(net, RTM_NEWNSID, id);
+	return id;
 }
-EXPORT_SYMBOL(peernet2id);
+EXPORT_SYMBOL(peernet2id_alloc);
+
+/* This function returns, if assigned, the id of a peer netns. */
+int peernet2id(struct net *net, struct net *peer)
+{
+	unsigned long flags;
+	int id;
+
+	spin_lock_irqsave(&net->nsid_lock, flags);
+	id = __peernet2id(net, peer);
+	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	return id;
+}
+
+/* This function returns true is the peer netns has an id assigned into the
+ * current netns.
+ */
+bool peernet_has_id(struct net *net, struct net *peer)
+{
+	return peernet2id(net, peer) >= 0;
+}
 
 struct net *get_net_ns_by_id(struct net *net, int id)
 {
+	unsigned long flags;
 	struct net *peer;
 
 	if (id < 0)
 		return NULL;
 
 	rcu_read_lock();
+	spin_lock_irqsave(&net->nsid_lock, flags);
 	peer = idr_find(&net->netns_ids, id);
 	if (peer)
 		get_net(peer);
+	spin_unlock_irqrestore(&net->nsid_lock, flags);
 	rcu_read_unlock();
 
 	return peer;
@@ -242,6 +281,7 @@
 	net->dev_base_seq = 1;
 	net->user_ns = user_ns;
 	idr_init(&net->netns_ids);
+	spin_lock_init(&net->nsid_lock);
 
 	list_for_each_entry(ops, &pernet_list, list) {
 		error = ops_init(ops, net);
@@ -362,14 +402,19 @@
 		list_del_rcu(&net->list);
 		list_add_tail(&net->exit_list, &net_exit_list);
 		for_each_net(tmp) {
-			int id = __peernet2id(tmp, net, false);
+			int id;
 
-			if (id >= 0) {
-				rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
+			spin_lock_irq(&tmp->nsid_lock);
+			id = __peernet2id(tmp, net);
+			if (id >= 0)
 				idr_remove(&tmp->netns_ids, id);
-			}
+			spin_unlock_irq(&tmp->nsid_lock);
+			if (id >= 0)
+				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
 		}
+		spin_lock_irq(&net->nsid_lock);
 		idr_destroy(&net->netns_ids);
+		spin_unlock_irq(&net->nsid_lock);
 
 	}
 	rtnl_unlock();
@@ -497,6 +542,7 @@
 {
 	struct net *net = sock_net(skb->sk);
 	struct nlattr *tb[NETNSA_MAX + 1];
+	unsigned long flags;
 	struct net *peer;
 	int nsid, err;
 
@@ -517,14 +563,19 @@
 	if (IS_ERR(peer))
 		return PTR_ERR(peer);
 
-	if (__peernet2id(net, peer, false) >= 0) {
+	spin_lock_irqsave(&net->nsid_lock, flags);
+	if (__peernet2id(net, peer) >= 0) {
+		spin_unlock_irqrestore(&net->nsid_lock, flags);
 		err = -EEXIST;
 		goto out;
 	}
 
 	err = alloc_netid(net, peer, nsid);
-	if (err > 0)
+	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	if (err >= 0) {
+		rtnl_net_notifyid(net, RTM_NEWNSID, err);
 		err = 0;
+	}
 out:
 	put_net(peer);
 	return err;
@@ -538,14 +589,10 @@
 }
 
 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
-			 int cmd, struct net *net, struct net *peer,
-			 int nsid)
+			 int cmd, struct net *net, int nsid)
 {
 	struct nlmsghdr *nlh;
 	struct rtgenmsg *rth;
-	int id;
-
-	ASSERT_RTNL();
 
 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
 	if (!nlh)
@@ -554,14 +601,7 @@
 	rth = nlmsg_data(nlh);
 	rth->rtgen_family = AF_UNSPEC;
 
-	if (nsid >= 0) {
-		id = nsid;
-	} else {
-		id = __peernet2id(net, peer, false);
-		if  (id < 0)
-			id = NETNSA_NSID_NOT_ASSIGNED;
-	}
-	if (nla_put_s32(skb, NETNSA_NSID, id))
+	if (nla_put_s32(skb, NETNSA_NSID, nsid))
 		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
@@ -578,7 +618,7 @@
 	struct nlattr *tb[NETNSA_MAX + 1];
 	struct sk_buff *msg;
 	struct net *peer;
-	int err;
+	int err, id;
 
 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
 			  rtnl_net_policy);
@@ -600,8 +640,9 @@
 		goto out;
 	}
 
+	id = peernet2id(net, peer);
 	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
-			    RTM_NEWNSID, net, peer, -1);
+			    RTM_NEWNSID, net, id);
 	if (err < 0)
 		goto err_out;
 
@@ -633,7 +674,7 @@
 
 	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
 			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
-			    RTM_NEWNSID, net_cb->net, peer, id);
+			    RTM_NEWNSID, net_cb->net, id);
 	if (ret < 0)
 		return ret;
 
@@ -652,17 +693,17 @@
 		.idx = 0,
 		.s_idx = cb->args[0],
 	};
+	unsigned long flags;
 
-	ASSERT_RTNL();
-
+	spin_lock_irqsave(&net->nsid_lock, flags);
 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+	spin_unlock_irqrestore(&net->nsid_lock, flags);
 
 	cb->args[0] = net_cb.idx;
 	return skb->len;
 }
 
-static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
-			      int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id)
 {
 	struct sk_buff *msg;
 	int err = -ENOMEM;
@@ -671,7 +712,7 @@
 	if (!msg)
 		goto out;
 
-	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
+	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
 	if (err < 0)
 		goto err_out;
 
diff --git a/net/core/netevent.c b/net/core/netevent.c
index f17ccd2..8b3bc4f 100644
--- a/net/core/netevent.c
+++ b/net/core/netevent.c
@@ -31,10 +31,7 @@
  */
 int register_netevent_notifier(struct notifier_block *nb)
 {
-	int err;
-
-	err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
-	return err;
+	return atomic_notifier_chain_register(&netevent_notif_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_netevent_notifier);
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 54817d3..05badbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -177,7 +177,7 @@
 #include <asm/dma.h>
 #include <asm/div64.h>		/* do_div */
 
-#define VERSION	"2.74"
+#define VERSION	"2.75"
 #define IP_NAME_SZ 32
 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
 #define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -210,6 +210,10 @@
 #define T_REMDEVALL   (1<<2)	/* Remove all devs */
 #define T_REMDEV      (1<<3)	/* Remove one dev */
 
+/* Xmit modes */
+#define M_START_XMIT		0	/* Default normal TX */
+#define M_NETIF_RECEIVE 	1	/* Inject packets into stack */
+
 /* If lock -- protects updating of if_list */
 #define   if_lock(t)           spin_lock(&(t->if_lock));
 #define   if_unlock(t)           spin_unlock(&(t->if_lock));
@@ -251,13 +255,14 @@
 	 * we will do a random selection from within the range.
 	 */
 	__u32 flags;
-	int removal_mark;	/* non-zero => the device is marked for
-				 * removal by worker thread */
-
+	int xmit_mode;
 	int min_pkt_size;
 	int max_pkt_size;
 	int pkt_overhead;	/* overhead for MPLS, VLANs, IPSEC etc */
 	int nfrags;
+	int removal_mark;	/* non-zero => the device is marked for
+				 * removal by worker thread */
+
 	struct page *page;
 	u64 delay;		/* nano-seconds */
 
@@ -507,7 +512,7 @@
 		pktgen_reset_all_threads(pn);
 
 	else
-		pr_warn("Unknown command: %s\n", data);
+		return -EINVAL;
 
 	return count;
 }
@@ -567,7 +572,7 @@
 			   "     dst_min: %s  dst_max: %s\n",
 			   pkt_dev->dst_min, pkt_dev->dst_max);
 		seq_printf(seq,
-			   "        src_min: %s  src_max: %s\n",
+			   "     src_min: %s  src_max: %s\n",
 			   pkt_dev->src_min, pkt_dev->src_max);
 	}
 
@@ -620,6 +625,9 @@
 	if (pkt_dev->node >= 0)
 		seq_printf(seq, "     node: %d\n", pkt_dev->node);
 
+	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
+		seq_puts(seq, "     xmit_mode: netif_receive\n");
+
 	seq_puts(seq, "     Flags: ");
 
 	if (pkt_dev->flags & F_IPV6)
@@ -1081,7 +1089,8 @@
 		if (len < 0)
 			return len;
 		if ((value > 0) &&
-		    (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+		    ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
+		     !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
 			return -ENOTSUPP;
 		i += len;
 		pkt_dev->clone_skb = value;
@@ -1134,7 +1143,7 @@
 			return len;
 
 		i += len;
-		if ((value > 1) &&
+		if ((value > 1) && (pkt_dev->xmit_mode == M_START_XMIT) &&
 		    (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
 			return -ENOTSUPP;
 		pkt_dev->burst = value < 1 ? 1 : value;
@@ -1160,6 +1169,45 @@
 			sprintf(pg_result, "ERROR: node not possible");
 		return count;
 	}
+	if (!strcmp(name, "xmit_mode")) {
+		char f[32];
+
+		memset(f, 0, 32);
+		len = strn_len(&user_buffer[i], sizeof(f) - 1);
+		if (len < 0)
+			return len;
+
+		if (copy_from_user(f, &user_buffer[i], len))
+			return -EFAULT;
+		i += len;
+
+		if (strcmp(f, "start_xmit") == 0) {
+			pkt_dev->xmit_mode = M_START_XMIT;
+		} else if (strcmp(f, "netif_receive") == 0) {
+			/* clone_skb set earlier, not supported in this mode */
+			if (pkt_dev->clone_skb > 0)
+				return -ENOTSUPP;
+
+			pkt_dev->xmit_mode = M_NETIF_RECEIVE;
+
+			/* make sure new packet is allocated every time
+			 * pktgen_xmit() is called
+			 */
+			pkt_dev->last_ok = 1;
+
+			/* override clone_skb if user passed default value
+			 * at module loading time
+			 */
+			pkt_dev->clone_skb = 0;
+		} else {
+			sprintf(pg_result,
+				"xmit_mode -:%s:- unknown\nAvailable modes: %s",
+				f, "start_xmit, netif_receive\n");
+			return count;
+		}
+		sprintf(pg_result, "OK: xmit_mode=%s", f);
+		return count;
+	}
 	if (!strcmp(name, "flag")) {
 		char f[32];
 		memset(f, 0, 32);
@@ -1267,6 +1315,9 @@
 		else if (strcmp(f, "NO_TIMESTAMP") == 0)
 			pkt_dev->flags |= F_NO_TIMESTAMP;
 
+		else if (strcmp(f, "!NO_TIMESTAMP") == 0)
+			pkt_dev->flags &= ~F_NO_TIMESTAMP;
+
 		else {
 			sprintf(pg_result,
 				"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2592,9 +2643,9 @@
 		struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
 		int nhead = 0;
 		if (x) {
-			int ret;
-			__u8 *eth;
+			struct ethhdr *eth;
 			struct iphdr *iph;
+			int ret;
 
 			nhead = x->props.header_len - skb_headroom(skb);
 			if (nhead > 0) {
@@ -2614,9 +2665,9 @@
 				goto err;
 			}
 			/* restore ll */
-			eth = (__u8 *) skb_push(skb, ETH_HLEN);
-			memcpy(eth, pkt_dev->hh, 12);
-			*(u16 *) &eth[12] = protocol;
+			eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+			memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
+			eth->h_proto = protocol;
 
 			/* Update IPv4 header len as well as checksum value */
 			iph = ip_hdr(skb);
@@ -3315,6 +3366,7 @@
 	unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
 	struct net_device *odev = pkt_dev->odev;
 	struct netdev_queue *txq;
+	struct sk_buff *skb;
 	int ret;
 
 	/* If device is offline, then don't send */
@@ -3352,6 +3404,37 @@
 	if (pkt_dev->delay && pkt_dev->last_ok)
 		spin(pkt_dev, pkt_dev->next_tx);
 
+	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
+		skb = pkt_dev->skb;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		atomic_add(burst, &skb->users);
+		local_bh_disable();
+		do {
+			ret = netif_receive_skb(skb);
+			if (ret == NET_RX_DROP)
+				pkt_dev->errors++;
+			pkt_dev->sofar++;
+			pkt_dev->seq_num++;
+			if (atomic_read(&skb->users) != burst) {
+				/* skb was queued by rps/rfs or taps,
+				 * so cannot reuse this skb
+				 */
+				atomic_sub(burst - 1, &skb->users);
+				/* get out of the loop and wait
+				 * until skb is consumed
+				 */
+				break;
+			}
+			/* skb was 'freed' by stack, so clean few
+			 * bits and reuse it
+			 */
+#ifdef CONFIG_NET_CLS_ACT
+			skb->tc_verd = 0; /* reset reclass/redir ttl */
+#endif
+		} while (--burst > 0);
+		goto out; /* Skips xmit_mode M_START_XMIT */
+	}
+
 	txq = skb_get_tx_queue(odev, pkt_dev->skb);
 
 	local_bh_disable();
@@ -3399,6 +3482,7 @@
 unlock:
 	HARD_TX_UNLOCK(odev, txq);
 
+out:
 	local_bh_enable();
 
 	/* If pkt_dev->count is zero, then run forever */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8de3682..01ced4a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -819,7 +819,19 @@
 			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
 			 nla_total_size(sizeof(struct ifla_vf_rate)) +
 			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
-			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)));
+			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
+			 /* IFLA_VF_STATS_RX_PACKETS */
+			 nla_total_size(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_TX_PACKETS */
+			 nla_total_size(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_RX_BYTES */
+			 nla_total_size(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_TX_BYTES */
+			 nla_total_size(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_BROADCAST */
+			 nla_total_size(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_MULTICAST */
+			 nla_total_size(sizeof(__u64)));
 		return size;
 	} else
 		return 0;
@@ -1004,16 +1016,20 @@
 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
 {
 	int err;
-	struct netdev_phys_item_id psid;
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.flags = SWITCHDEV_F_NO_RECURSE,
+	};
 
-	err = netdev_switch_parent_id_get(dev, &psid);
+	err = switchdev_port_attr_get(dev, &attr);
 	if (err) {
 		if (err == -EOPNOTSUPP)
 			return 0;
 		return err;
 	}
 
-	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, psid.id_len, psid.id))
+	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
+		    attr.u.ppid.id))
 		return -EMSGSIZE;
 
 	return 0;
@@ -1119,7 +1135,7 @@
 	    && (ext_filter_mask & RTEXT_FILTER_VF)) {
 		int i;
 
-		struct nlattr *vfinfo, *vf;
+		struct nlattr *vfinfo, *vf, *vfstats;
 		int num_vfs = dev_num_vf(dev->dev.parent);
 
 		vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
@@ -1134,6 +1150,7 @@
 			struct ifla_vf_spoofchk vf_spoofchk;
 			struct ifla_vf_link_state vf_linkstate;
 			struct ifla_vf_rss_query_en vf_rss_query_en;
+			struct ifla_vf_stats vf_stats;
 
 			/*
 			 * Not all SR-IOV capable drivers support the
@@ -1186,6 +1203,30 @@
 				    sizeof(vf_rss_query_en),
 				    &vf_rss_query_en))
 				goto nla_put_failure;
+			memset(&vf_stats, 0, sizeof(vf_stats));
+			if (dev->netdev_ops->ndo_get_vf_stats)
+				dev->netdev_ops->ndo_get_vf_stats(dev, i,
+								  &vf_stats);
+			vfstats = nla_nest_start(skb, IFLA_VF_STATS);
+			if (!vfstats) {
+				nla_nest_cancel(skb, vf);
+				nla_nest_cancel(skb, vfinfo);
+				goto nla_put_failure;
+			}
+			if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
+					vf_stats.rx_packets) ||
+			    nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
+					vf_stats.tx_packets) ||
+			    nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
+					vf_stats.rx_bytes) ||
+			    nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
+					vf_stats.tx_bytes) ||
+			    nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
+					vf_stats.broadcast) ||
+			    nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
+					vf_stats.multicast))
+				goto nla_put_failure;
+			nla_nest_end(skb, vfstats);
 			nla_nest_end(skb, vf);
 		}
 		nla_nest_end(skb, vfinfo);
@@ -1204,7 +1245,7 @@
 		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
 
 		if (!net_eq(dev_net(dev), link_net)) {
-			int id = peernet2id(dev_net(dev), link_net);
+			int id = peernet2id_alloc(dev_net(dev), link_net);
 
 			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
 				goto nla_put_failure;
@@ -1299,6 +1340,16 @@
 	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
 	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
 	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
+	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
+};
+
+static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
+	[IFLA_VF_STATS_RX_PACKETS]	= { .type = NLA_U64 },
+	[IFLA_VF_STATS_TX_PACKETS]	= { .type = NLA_U64 },
+	[IFLA_VF_STATS_RX_BYTES]	= { .type = NLA_U64 },
+	[IFLA_VF_STATS_TX_BYTES]	= { .type = NLA_U64 },
+	[IFLA_VF_STATS_BROADCAST]	= { .type = NLA_U64 },
+	[IFLA_VF_STATS_MULTICAST]	= { .type = NLA_U64 },
 };
 
 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -2857,7 +2908,11 @@
 
 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 			    struct net_device *dev, u16 mode,
-			    u32 flags, u32 mask, int nlflags)
+			    u32 flags, u32 mask, int nlflags,
+			    u32 filter_mask,
+			    int (*vlan_fill)(struct sk_buff *skb,
+					     struct net_device *dev,
+					     u32 filter_mask))
 {
 	struct nlmsghdr *nlh;
 	struct ifinfomsg *ifm;
@@ -2865,6 +2920,7 @@
 	struct nlattr *protinfo;
 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+	int err = 0;
 
 	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
 	if (nlh == NULL)
@@ -2905,6 +2961,13 @@
 			goto nla_put_failure;
 		}
 	}
+	if (vlan_fill) {
+		err = vlan_fill(skb, dev, filter_mask);
+		if (err) {
+			nla_nest_cancel(skb, br_afspec);
+			goto nla_put_failure;
+		}
+	}
 	nla_nest_end(skb, br_afspec);
 
 	protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
@@ -2938,9 +3001,9 @@
 	return 0;
 nla_put_failure:
 	nlmsg_cancel(skb, nlh);
-	return -EMSGSIZE;
+	return err ? err : -EMSGSIZE;
 }
-EXPORT_SYMBOL(ndo_dflt_bridge_getlink);
+EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
 
 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
 {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 51dd319..fd3ce46 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -154,7 +154,7 @@
 	net_secret_init();
 	memcpy(hash, saddr, 16);
 	for (i = 0; i < 4; i++)
-		secret[i] = net_secret[i] + daddr[i];
+		secret[i] = net_secret[i] + (__force u32)daddr[i];
 	secret[4] = net_secret[4] +
 		(((__force u16)sport << 16) + (__force u16)dport);
 	for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 41ec022..b6a19ca 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -347,94 +347,18 @@
 }
 EXPORT_SYMBOL(build_skb);
 
-struct netdev_alloc_cache {
-	struct page_frag	frag;
-	/* we maintain a pagecount bias, so that we dont dirty cache line
-	 * containing page->_count every time we allocate a fragment.
-	 */
-	unsigned int		pagecnt_bias;
-};
-static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
-
-static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
-				       gfp_t gfp_mask)
-{
-	const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
-	struct page *page = NULL;
-	gfp_t gfp = gfp_mask;
-
-	if (order) {
-		gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
-			    __GFP_NOMEMALLOC;
-		page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
-		nc->frag.size = PAGE_SIZE << (page ? order : 0);
-	}
-
-	if (unlikely(!page))
-		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
-
-	nc->frag.page = page;
-
-	return page;
-}
-
-static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
-			       unsigned int fragsz, gfp_t gfp_mask)
-{
-	struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
-	struct page *page = nc->frag.page;
-	unsigned int size;
-	int offset;
-
-	if (unlikely(!page)) {
-refill:
-		page = __page_frag_refill(nc, gfp_mask);
-		if (!page)
-			return NULL;
-
-		/* if size can vary use frag.size else just use PAGE_SIZE */
-		size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
-
-		/* Even if we own the page, we do not use atomic_set().
-		 * This would break get_page_unless_zero() users.
-		 */
-		atomic_add(size - 1, &page->_count);
-
-		/* reset page count bias and offset to start of new frag */
-		nc->pagecnt_bias = size;
-		nc->frag.offset = size;
-	}
-
-	offset = nc->frag.offset - fragsz;
-	if (unlikely(offset < 0)) {
-		if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
-			goto refill;
-
-		/* if size can vary use frag.size else just use PAGE_SIZE */
-		size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
-
-		/* OK, page count is 0, we can safely set it */
-		atomic_set(&page->_count, size);
-
-		/* reset page count bias and offset to start of new frag */
-		nc->pagecnt_bias = size;
-		offset = size - fragsz;
-	}
-
-	nc->pagecnt_bias--;
-	nc->frag.offset = offset;
-
-	return page_address(page) + offset;
-}
+static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
 
 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
+	struct page_frag_cache *nc;
 	unsigned long flags;
 	void *data;
 
 	local_irq_save(flags);
-	data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
+	nc = this_cpu_ptr(&netdev_alloc_cache);
+	data = __alloc_page_frag(nc, fragsz, gfp_mask);
 	local_irq_restore(flags);
 	return data;
 }
@@ -454,7 +378,9 @@
 
 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
-	return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
+	struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+
+	return __alloc_page_frag(nc, fragsz, gfp_mask);
 }
 
 void *napi_alloc_frag(unsigned int fragsz)
@@ -464,51 +390,6 @@
 EXPORT_SYMBOL(napi_alloc_frag);
 
 /**
- *	__alloc_rx_skb - allocate an skbuff for rx
- *	@length: length to allocate
- *	@gfp_mask: get_free_pages mask, passed to alloc_skb
- *	@flags:	If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
- *		allocations in case we have to fallback to __alloc_skb()
- *		If SKB_ALLOC_NAPI is set, page fragment will be allocated
- *		from napi_cache instead of netdev_cache.
- *
- *	Allocate a new &sk_buff and assign it a usage count of one. The
- *	buffer has unspecified headroom built in. Users should allocate
- *	the headroom they think they need without accounting for the
- *	built in space. The built in space is used for optimisations.
- *
- *	%NULL is returned if there is no free memory.
- */
-static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
-				      int flags)
-{
-	struct sk_buff *skb = NULL;
-	unsigned int fragsz = SKB_DATA_ALIGN(length) +
-			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-	if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
-		void *data;
-
-		if (sk_memalloc_socks())
-			gfp_mask |= __GFP_MEMALLOC;
-
-		data = (flags & SKB_ALLOC_NAPI) ?
-			__napi_alloc_frag(fragsz, gfp_mask) :
-			__netdev_alloc_frag(fragsz, gfp_mask);
-
-		if (likely(data)) {
-			skb = build_skb(data, fragsz);
-			if (unlikely(!skb))
-				put_page(virt_to_head_page(data));
-		}
-	} else {
-		skb = __alloc_skb(length, gfp_mask,
-				  SKB_ALLOC_RX, NUMA_NO_NODE);
-	}
-	return skb;
-}
-
-/**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *	@dev: network device to receive on
  *	@length: length to allocate
@@ -521,19 +402,58 @@
  *
  *	%NULL is returned if there is no free memory.
  */
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-				   unsigned int length, gfp_t gfp_mask)
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+				   gfp_t gfp_mask)
 {
+	struct page_frag_cache *nc;
+	unsigned long flags;
 	struct sk_buff *skb;
+	bool pfmemalloc;
+	void *data;
 
-	length += NET_SKB_PAD;
-	skb = __alloc_rx_skb(length, gfp_mask, 0);
+	len += NET_SKB_PAD;
 
-	if (likely(skb)) {
-		skb_reserve(skb, NET_SKB_PAD);
-		skb->dev = dev;
+	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+	    (gfp_mask & (__GFP_WAIT | GFP_DMA))) {
+		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+		if (!skb)
+			goto skb_fail;
+		goto skb_success;
 	}
 
+	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	len = SKB_DATA_ALIGN(len);
+
+	if (sk_memalloc_socks())
+		gfp_mask |= __GFP_MEMALLOC;
+
+	local_irq_save(flags);
+
+	nc = this_cpu_ptr(&netdev_alloc_cache);
+	data = __alloc_page_frag(nc, len, gfp_mask);
+	pfmemalloc = nc->pfmemalloc;
+
+	local_irq_restore(flags);
+
+	if (unlikely(!data))
+		return NULL;
+
+	skb = __build_skb(data, len);
+	if (unlikely(!skb)) {
+		skb_free_frag(data);
+		return NULL;
+	}
+
+	/* use OR instead of assignment to avoid clearing of bits in mask */
+	if (pfmemalloc)
+		skb->pfmemalloc = 1;
+	skb->head_frag = 1;
+
+skb_success:
+	skb_reserve(skb, NET_SKB_PAD);
+	skb->dev = dev;
+
+skb_fail:
 	return skb;
 }
 EXPORT_SYMBOL(__netdev_alloc_skb);
@@ -551,19 +471,49 @@
  *
  *	%NULL is returned if there is no free memory.
  */
-struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
-				 unsigned int length, gfp_t gfp_mask)
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+				 gfp_t gfp_mask)
 {
+	struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 	struct sk_buff *skb;
+	void *data;
 
-	length += NET_SKB_PAD + NET_IP_ALIGN;
-	skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
+	len += NET_SKB_PAD + NET_IP_ALIGN;
 
-	if (likely(skb)) {
-		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-		skb->dev = napi->dev;
+	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+	    (gfp_mask & (__GFP_WAIT | GFP_DMA))) {
+		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+		if (!skb)
+			goto skb_fail;
+		goto skb_success;
 	}
 
+	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	len = SKB_DATA_ALIGN(len);
+
+	if (sk_memalloc_socks())
+		gfp_mask |= __GFP_MEMALLOC;
+
+	data = __alloc_page_frag(nc, len, gfp_mask);
+	if (unlikely(!data))
+		return NULL;
+
+	skb = __build_skb(data, len);
+	if (unlikely(!skb)) {
+		skb_free_frag(data);
+		return NULL;
+	}
+
+	/* use OR instead of assignment to avoid clearing of bits in mask */
+	if (nc->pfmemalloc)
+		skb->pfmemalloc = 1;
+	skb->head_frag = 1;
+
+skb_success:
+	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+	skb->dev = napi->dev;
+
+skb_fail:
 	return skb;
 }
 EXPORT_SYMBOL(__napi_alloc_skb);
@@ -611,10 +561,12 @@
 
 static void skb_free_head(struct sk_buff *skb)
 {
+	unsigned char *head = skb->head;
+
 	if (skb->head_frag)
-		put_page(virt_to_head_page(skb->head));
+		skb_free_frag(head);
 	else
-		kfree(skb->head);
+		kfree(head);
 }
 
 static void skb_release_data(struct sk_buff *skb)
@@ -1918,15 +1870,39 @@
 	return false;
 }
 
+ssize_t skb_socket_splice(struct sock *sk,
+			  struct pipe_inode_info *pipe,
+			  struct splice_pipe_desc *spd)
+{
+	int ret;
+
+	/* Drop the socket lock, otherwise we have reverse
+	 * locking dependencies between sk_lock and i_mutex
+	 * here as compared to sendfile(). We enter here
+	 * with the socket lock held, and splice_to_pipe() will
+	 * grab the pipe inode lock. For sendfile() emulation,
+	 * we call into ->sendpage() with the i_mutex lock held
+	 * and networking will grab the socket lock.
+	 */
+	release_sock(sk);
+	ret = splice_to_pipe(pipe, spd);
+	lock_sock(sk);
+
+	return ret;
+}
+
 /*
  * Map data from the skb to a pipe. Should handle both the linear part,
  * the fragments, and the frag list. It does NOT handle frag lists within
  * the frag list, if such a thing exists. We'd probably need to recurse to
  * handle that cleanly.
  */
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
 		    struct pipe_inode_info *pipe, unsigned int tlen,
-		    unsigned int flags)
+		    unsigned int flags,
+		    ssize_t (*splice_cb)(struct sock *,
+					 struct pipe_inode_info *,
+					 struct splice_pipe_desc *))
 {
 	struct partial_page partial[MAX_SKB_FRAGS];
 	struct page *pages[MAX_SKB_FRAGS];
@@ -1939,7 +1915,6 @@
 		.spd_release = sock_spd_release,
 	};
 	struct sk_buff *frag_iter;
-	struct sock *sk = skb->sk;
 	int ret = 0;
 
 	/*
@@ -1962,23 +1937,12 @@
 	}
 
 done:
-	if (spd.nr_pages) {
-		/*
-		 * Drop the socket lock, otherwise we have reverse
-		 * locking dependencies between sk_lock and i_mutex
-		 * here as compared to sendfile(). We enter here
-		 * with the socket lock held, and splice_to_pipe() will
-		 * grab the pipe inode lock. For sendfile() emulation,
-		 * we call into ->sendpage() with the i_mutex lock held
-		 * and networking will grab the socket lock.
-		 */
-		release_sock(sk);
-		ret = splice_to_pipe(pipe, &spd);
-		lock_sock(sk);
-	}
+	if (spd.nr_pages)
+		ret = splice_cb(sk, pipe, &spd);
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(skb_splice_bits);
 
 /**
  *	skb_store_bits - store bits from kernel buffer to skb
@@ -2963,6 +2927,24 @@
 }
 EXPORT_SYMBOL(skb_append_datato_frags);
 
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+			 int offset, size_t size)
+{
+	int i = skb_shinfo(skb)->nr_frags;
+
+	if (skb_can_coalesce(skb, i, page, offset)) {
+		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
+	} else if (i < MAX_SKB_FRAGS) {
+		get_page(page);
+		skb_fill_page_desc(skb, i, page, offset, size);
+	} else {
+		return -EMSGSIZE;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(skb_append_pagefrags);
+
 /**
  *	skb_pull_rcsum - pull skb and update receive checksum
  *	@skb: buffer to update
@@ -4030,6 +4012,93 @@
 }
 EXPORT_SYMBOL(skb_checksum_setup);
 
+/**
+ * skb_checksum_maybe_trim - maybe trims the given skb
+ * @skb: the skb to check
+ * @transport_len: the data length beyond the network header
+ *
+ * Checks whether the given skb has data beyond the given transport length.
+ * If so, returns a cloned skb trimmed to this transport length.
+ * Otherwise returns the provided skb. Returns NULL in error cases
+ * (e.g. transport_len exceeds skb length or out-of-memory).
+ *
+ * Caller needs to set the skb transport header and release the returned skb.
+ * Provided skb is consumed.
+ */
+static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
+					       unsigned int transport_len)
+{
+	struct sk_buff *skb_chk;
+	unsigned int len = skb_transport_offset(skb) + transport_len;
+	int ret;
+
+	if (skb->len < len) {
+		kfree_skb(skb);
+		return NULL;
+	} else if (skb->len == len) {
+		return skb;
+	}
+
+	skb_chk = skb_clone(skb, GFP_ATOMIC);
+	kfree_skb(skb);
+
+	if (!skb_chk)
+		return NULL;
+
+	ret = pskb_trim_rcsum(skb_chk, len);
+	if (ret) {
+		kfree_skb(skb_chk);
+		return NULL;
+	}
+
+	return skb_chk;
+}
+
+/**
+ * skb_checksum_trimmed - validate checksum of an skb
+ * @skb: the skb to check
+ * @transport_len: the data length beyond the network header
+ * @skb_chkf: checksum function to use
+ *
+ * Applies the given checksum function skb_chkf to the provided skb.
+ * Returns a checked and maybe trimmed skb. Returns NULL on error.
+ *
+ * If the skb has data beyond the given transport length, then a
+ * trimmed & cloned skb is checked and returned.
+ *
+ * Caller needs to set the skb transport header and release the returned skb.
+ * Provided skb is consumed.
+ */
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+				     unsigned int transport_len,
+				     __sum16(*skb_chkf)(struct sk_buff *skb))
+{
+	struct sk_buff *skb_chk;
+	unsigned int offset = skb_transport_offset(skb);
+	__sum16 ret;
+
+	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
+	if (!skb_chk)
+		return NULL;
+
+	if (!pskb_may_pull(skb_chk, offset)) {
+		kfree_skb(skb_chk);
+		return NULL;
+	}
+
+	__skb_pull(skb_chk, offset);
+	ret = skb_chkf(skb_chk);
+	__skb_push(skb_chk, offset);
+
+	if (ret) {
+		kfree_skb(skb_chk);
+		return NULL;
+	}
+
+	return skb_chk;
+}
+EXPORT_SYMBOL(skb_checksum_trimmed);
+
 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
 {
 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
diff --git a/net/core/sock.c b/net/core/sock.c
index dc30dc5..1e1fe9a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -131,6 +131,7 @@
 #include <linux/ipsec.h>
 #include <net/cls_cgroup.h>
 #include <net/netprio_cgroup.h>
+#include <linux/sock_diag.h>
 
 #include <linux/filter.h>
 
@@ -1393,9 +1394,10 @@
  *	@family: protocol family
  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  *	@prot: struct proto associated with this new sock instance
+ *	@kern: is this to be a kernel socket?
  */
 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
-		      struct proto *prot)
+		      struct proto *prot, int kern)
 {
 	struct sock *sk;
 
@@ -1408,7 +1410,10 @@
 		 */
 		sk->sk_prot = sk->sk_prot_creator = prot;
 		sock_lock_init(sk);
-		sock_net_set(sk, get_net(net));
+		sk->sk_net_refcnt = kern ? 0 : 1;
+		if (likely(sk->sk_net_refcnt))
+			get_net(net);
+		sock_net_set(sk, net);
 		atomic_set(&sk->sk_wmem_alloc, 1);
 
 		sock_update_classid(sk);
@@ -1419,7 +1424,7 @@
 }
 EXPORT_SYMBOL(sk_alloc);
 
-static void __sk_free(struct sock *sk)
+void sk_destruct(struct sock *sk)
 {
 	struct sk_filter *filter;
 
@@ -1442,10 +1447,19 @@
 	if (sk->sk_peer_cred)
 		put_cred(sk->sk_peer_cred);
 	put_pid(sk->sk_peer_pid);
-	put_net(sock_net(sk));
+	if (likely(sk->sk_net_refcnt))
+		put_net(sock_net(sk));
 	sk_prot_free(sk->sk_prot_creator, sk);
 }
 
+static void __sk_free(struct sock *sk)
+{
+	if (unlikely(sock_diag_has_destroy_listeners(sk)))
+		sock_diag_broadcast_destroy(sk);
+	else
+		sk_destruct(sk);
+}
+
 void sk_free(struct sock *sk)
 {
 	/*
@@ -1458,25 +1472,6 @@
 }
 EXPORT_SYMBOL(sk_free);
 
-/*
- * Last sock_put should drop reference to sk->sk_net. It has already
- * been dropped in sk_change_net. Taking reference to stopping namespace
- * is not an option.
- * Take reference to a socket to remove it from hash _alive_ and after that
- * destroy it in the context of init_net.
- */
-void sk_release_kernel(struct sock *sk)
-{
-	if (sk == NULL || sk->sk_socket == NULL)
-		return;
-
-	sock_hold(sk);
-	sock_release(sk->sk_socket);
-	sock_net_set(sk, get_net(&init_net));
-	sock_put(sk);
-}
-EXPORT_SYMBOL(sk_release_kernel);
-
 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
 {
 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
@@ -1592,6 +1587,8 @@
 
 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
+	u32 max_segs = 1;
+
 	__sk_dst_set(sk, dst);
 	sk->sk_route_caps = dst->dev->features;
 	if (sk->sk_route_caps & NETIF_F_GSO)
@@ -1603,9 +1600,10 @@
 		} else {
 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 			sk->sk_gso_max_size = dst->dev->gso_max_size;
-			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
+			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
 		}
 	}
+	sk->sk_gso_max_segs = max_segs;
 }
 EXPORT_SYMBOL_GPL(sk_setup_caps);
 
@@ -2080,12 +2078,13 @@
 /**
  *	__sk_reclaim - reclaim memory_allocated
  *	@sk: socket
+ *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
  */
-void __sk_mem_reclaim(struct sock *sk)
+void __sk_mem_reclaim(struct sock *sk, int amount)
 {
-	sk_memory_allocated_sub(sk,
-				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
-	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+	amount >>= SK_MEM_QUANTUM_SHIFT;
+	sk_memory_allocated_sub(sk, amount);
+	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
 
 	if (sk_under_memory_pressure(sk) &&
 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 74dddf8..d79866c 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -5,6 +5,9 @@
 #include <net/net_namespace.h>
 #include <linux/module.h>
 #include <net/sock.h>
+#include <linux/kernel.h>
+#include <linux/tcp.h>
+#include <linux/workqueue.h>
 
 #include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
@@ -12,6 +15,7 @@
 static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
+static struct workqueue_struct *broadcast_wq;
 
 static u64 sock_gen_cookie(struct sock *sk)
 {
@@ -101,6 +105,62 @@
 }
 EXPORT_SYMBOL(sock_diag_put_filterinfo);
 
+struct broadcast_sk {
+	struct sock *sk;
+	struct work_struct work;
+};
+
+static size_t sock_diag_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
+	       + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */
+	       + nla_total_size(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
+}
+
+static void sock_diag_broadcast_destroy_work(struct work_struct *work)
+{
+	struct broadcast_sk *bsk =
+		container_of(work, struct broadcast_sk, work);
+	struct sock *sk = bsk->sk;
+	const struct sock_diag_handler *hndl;
+	struct sk_buff *skb;
+	const enum sknetlink_groups group = sock_diag_destroy_group(sk);
+	int err = -1;
+
+	WARN_ON(group == SKNLGRP_NONE);
+
+	skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL);
+	if (!skb)
+		goto out;
+
+	mutex_lock(&sock_diag_table_mutex);
+	hndl = sock_diag_handlers[sk->sk_family];
+	if (hndl && hndl->get_info)
+		err = hndl->get_info(skb, sk);
+	mutex_unlock(&sock_diag_table_mutex);
+
+	if (!err)
+		nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
+				GFP_KERNEL);
+	else
+		kfree_skb(skb);
+out:
+	sk_destruct(sk);
+	kfree(bsk);
+}
+
+void sock_diag_broadcast_destroy(struct sock *sk)
+{
+	/* Note, this function is often called from an interrupt context. */
+	struct broadcast_sk *bsk =
+		kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
+	if (!bsk)
+		return sk_destruct(sk);
+	bsk->sk = sk;
+	INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work);
+	queue_work(broadcast_wq, &bsk->work);
+}
+
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
 {
 	mutex_lock(&sock_diag_table_mutex);
@@ -211,10 +271,32 @@
 	mutex_unlock(&sock_diag_mutex);
 }
 
+static int sock_diag_bind(struct net *net, int group)
+{
+	switch (group) {
+	case SKNLGRP_INET_TCP_DESTROY:
+	case SKNLGRP_INET_UDP_DESTROY:
+		if (!sock_diag_handlers[AF_INET])
+			request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+				       NETLINK_SOCK_DIAG, AF_INET);
+		break;
+	case SKNLGRP_INET6_TCP_DESTROY:
+	case SKNLGRP_INET6_UDP_DESTROY:
+		if (!sock_diag_handlers[AF_INET6])
+			request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+				       NETLINK_SOCK_DIAG, AF_INET);
+		break;
+	}
+	return 0;
+}
+
 static int __net_init diag_net_init(struct net *net)
 {
 	struct netlink_kernel_cfg cfg = {
+		.groups	= SKNLGRP_MAX,
 		.input	= sock_diag_rcv,
+		.bind	= sock_diag_bind,
+		.flags	= NL_CFG_F_NONROOT_RECV,
 	};
 
 	net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
@@ -234,12 +316,15 @@
 
 static int __init sock_diag_init(void)
 {
+	broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0);
+	BUG_ON(!broadcast_wq);
 	return register_pernet_subsys(&diag_net_ops);
 }
 
 static void __exit sock_diag_exit(void)
 {
 	unregister_pernet_subsys(&diag_net_ops);
+	destroy_workqueue(broadcast_wq);
 }
 
 module_init(sock_diag_init);
diff --git a/net/core/stream.c b/net/core/stream.c
index 301c05f..d70f77a0 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -119,6 +119,7 @@
 	int err = 0;
 	long vm_wait = 0;
 	long current_timeo = *timeo_p;
+	bool noblock = (*timeo_p ? false : true);
 	DEFINE_WAIT(wait);
 
 	if (sk_stream_memory_free(sk))
@@ -131,8 +132,11 @@
 
 		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 			goto do_error;
-		if (!*timeo_p)
+		if (!*timeo_p) {
+			if (noblock)
+				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 			goto do_nonblock;
+		}
 		if (signal_pending(current))
 			goto do_interrupted;
 		clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/core/utils.c b/net/core/utils.c
index 7b80388..a7732a0 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -304,13 +304,15 @@
 			      __be32 from, __be32 to, int pseudohdr)
 {
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
-		*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from),
-				 to));
+		csum_replace4(sum, from, to);
 		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
-			skb->csum = ~csum_add(csum_sub(~(skb->csum), from), to);
+			skb->csum = ~csum_add(csum_sub(~(skb->csum),
+						       (__force __wsum)from),
+					      (__force __wsum)to);
 	} else if (pseudohdr)
-		*sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum), from),
-				  to));
+		*sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum),
+						    (__force __wsum)from),
+					   (__force __wsum)to));
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 5a45f8d..2d84303 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -66,6 +66,7 @@
 	.dump_one	 = dccp_diag_dump_one,
 	.idiag_get_info	 = dccp_diag_get_info,
 	.idiag_type	 = IPPROTO_DCCP,
+	.idiag_info_size = sizeof(struct tcp_info),
 };
 
 static int __init dccp_diag_init(void)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 754484b..675cf94 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -468,10 +468,10 @@
 	.obj_size		= sizeof(struct dn_sock),
 };
 
-static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
+static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
 {
 	struct dn_scp *scp;
-	struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
+	struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
 
 	if  (!sk)
 		goto out;
@@ -693,7 +693,7 @@
 	}
 
 
-	if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL)
+	if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
 		return -ENOBUFS;
 
 	sk->sk_protocol = protocol;
@@ -1096,7 +1096,7 @@
 
 	cb = DN_SKB_CB(skb);
 	sk->sk_ack_backlog--;
-	newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
+	newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
 	if (newsk == NULL) {
 		release_sock(sk);
 		kfree_skb(skb);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 827cda56..04ffad3 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -345,6 +345,24 @@
 	return ret;
 }
 
+static int dsa_slave_port_attr_set(struct net_device *dev,
+				   struct switchdev_attr *attr)
+{
+	int ret = 0;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_STP_STATE:
+		if (attr->trans == SWITCHDEV_TRANS_COMMIT)
+			ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
 static int dsa_slave_bridge_port_join(struct net_device *dev,
 				      struct net_device *br)
 {
@@ -382,14 +400,20 @@
 	return ret;
 }
 
-static int dsa_slave_parent_id_get(struct net_device *dev,
-				   struct netdev_phys_item_id *psid)
+static int dsa_slave_port_attr_get(struct net_device *dev,
+				   struct switchdev_attr *attr)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 
-	psid->id_len = sizeof(ds->index);
-	memcpy(&psid->id, &ds->index, psid->id_len);
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+		attr->u.ppid.id_len = sizeof(ds->index);
+		memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
 
 	return 0;
 }
@@ -675,9 +699,9 @@
 	.ndo_get_iflink		= dsa_slave_get_iflink,
 };
 
-static const struct swdev_ops dsa_slave_swdev_ops = {
-	.swdev_parent_id_get = dsa_slave_parent_id_get,
-	.swdev_port_stp_update = dsa_slave_stp_update,
+static const struct switchdev_ops dsa_slave_switchdev_ops = {
+	.switchdev_port_attr_get	= dsa_slave_port_attr_get,
+	.switchdev_port_attr_set	= dsa_slave_port_attr_set,
 };
 
 static void dsa_slave_adjust_link(struct net_device *dev)
@@ -810,12 +834,19 @@
 	return 0;
 }
 
+static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
+static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
+					    struct netdev_queue *txq,
+					    void *_unused)
+{
+	lockdep_set_class(&txq->_xmit_lock,
+			  &dsa_slave_netdev_xmit_lock_key);
+}
+
 int dsa_slave_suspend(struct net_device *slave_dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
 
-	netif_device_detach(slave_dev);
-
 	if (p->phy) {
 		phy_stop(p->phy);
 		p->old_pause = -1;
@@ -859,7 +890,10 @@
 	eth_hw_addr_inherit(slave_dev, master);
 	slave_dev->tx_queue_len = 0;
 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
-	slave_dev->swdev_ops = &dsa_slave_swdev_ops;
+	slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
+
+	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
+				 NULL);
 
 	SET_NETDEV_DEV(slave_dev, parent);
 	slave_dev->dev.of_node = ds->pd->port_dn[port];
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f3bad41..77e0f0e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -58,6 +58,7 @@
 #include <net/ipv6.h>
 #include <net/ip.h>
 #include <net/dsa.h>
+#include <net/flow_dissector.h>
 #include <linux/uaccess.h>
 
 __setup("ether=", netdev_boot_setup);
@@ -130,9 +131,9 @@
 		return len;
 
 	/* parse any remaining L2/L3 headers, check for L4 */
-	if (!__skb_flow_dissect(NULL, &keys, data,
-				eth->h_proto, sizeof(*eth), len))
-		return max_t(u32, keys.thoff, sizeof(*eth));
+	if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
+					    sizeof(*eth), len))
+		return max_t(u32, keys.control.thoff, sizeof(*eth));
 
 	/* parse for any L4 headers */
 	return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
@@ -156,10 +157,11 @@
 
 	skb->dev = dev;
 	skb_reset_mac_header(skb);
-	skb_pull_inline(skb, ETH_HLEN);
-	eth = eth_hdr(skb);
 
-	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+	eth = (struct ethhdr *)skb->data;
+	skb_pull_inline(skb, ETH_HLEN);
+
+	if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
 		if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
 			skb->pkt_type = PACKET_BROADCAST;
 		else
@@ -178,7 +180,7 @@
 	if (unlikely(netdev_uses_dsa(dev)))
 		return htons(ETH_P_XDSA);
 
-	if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
+	if (likely(eth_proto_is_802_3(eth->h_proto)))
 		return eth->h_proto;
 
 	/*
@@ -468,6 +470,7 @@
 
 static struct packet_offload eth_packet_offload __read_mostly = {
 	.type = cpu_to_be16(ETH_P_TEB),
+	.priority = 10,
 	.callbacks = {
 		.gro_receive = eth_gro_receive,
 		.gro_complete = eth_gro_complete,
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 0ae5822..f20a387 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -55,27 +55,6 @@
 LIST_HEAD(lowpan_devices);
 static int lowpan_open_count;
 
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
-	struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-	return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
-	struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-	return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
-	struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-	return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
 static struct header_ops lowpan_header_ops = {
 	.create	= lowpan_header_create,
 };
@@ -103,12 +82,6 @@
 	.ndo_start_xmit		= lowpan_xmit,
 };
 
-static struct ieee802154_mlme_ops lowpan_mlme = {
-	.get_pan_id = lowpan_get_pan_id,
-	.get_short_addr = lowpan_get_short_addr,
-	.get_dsn = lowpan_get_dsn,
-};
-
 static void lowpan_setup(struct net_device *dev)
 {
 	dev->addr_len		= IEEE802154_ADDR_LEN;
@@ -124,7 +97,6 @@
 
 	dev->netdev_ops		= &lowpan_netdev_ops;
 	dev->header_ops		= &lowpan_header_ops;
-	dev->ml_priv		= &lowpan_mlme;
 	dev->destructor		= free_netdev;
 	dev->features		|= NETIF_F_NETNS_LOCAL;
 }
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index 2349070..2597abb 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -190,6 +190,7 @@
 
 static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
 {
+	struct wpan_dev *wpan_dev = lowpan_dev_info(dev)->real_dev->ieee802154_ptr;
 	struct ieee802154_addr sa, da;
 	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 	struct lowpan_addr_info info;
@@ -207,7 +208,7 @@
 
 	/* prepare wpan address data */
 	sa.mode = IEEE802154_ADDR_LONG;
-	sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+	sa.pan_id = wpan_dev->pan_id;
 	sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
 
 	/* intra-PAN communications */
@@ -223,7 +224,7 @@
 	} else {
 		da.mode = IEEE802154_ADDR_LONG;
 		da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-		cb->ackreq = true;
+		cb->ackreq = wpan_dev->frame_retries >= 0;
 	}
 
 	return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 2ee00e8..b0248e9 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -121,8 +121,6 @@
 	/* atomic_inc_return makes it start at 1, make it start at 0 */
 	rdev->wpan_phy_idx--;
 
-	mutex_init(&rdev->wpan_phy.pib_lock);
-
 	INIT_LIST_HEAD(&rdev->wpan_dev_list);
 	device_initialize(&rdev->wpan_phy.dev);
 	dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 2b4955d..3503c38 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -97,8 +97,10 @@
 	BUG_ON(!phy);
 	get_device(&phy->dev);
 
-	short_addr = ops->get_short_addr(dev);
-	pan_id = ops->get_pan_id(dev);
+	rtnl_lock();
+	short_addr = dev->ieee802154_ptr->short_addr;
+	pan_id = dev->ieee802154_ptr->pan_id;
+	rtnl_unlock();
 
 	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
 	    nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
@@ -117,12 +119,12 @@
 		rtnl_unlock();
 
 		if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
-			       params.transmit_power) ||
+			       params.transmit_power / 100) ||
 		    nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
 		    nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
 			       params.cca.mode) ||
 		    nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
-				params.cca_ed_level) ||
+				params.cca_ed_level / 100) ||
 		    nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
 			       params.csma_retries) ||
 		    nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
@@ -166,10 +168,7 @@
 	if (!dev)
 		return NULL;
 
-	/* Check on mtu is currently a hacked solution because lowpan
-	 * and wpan have the same ARPHRD type.
-	 */
-	if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) {
+	if (dev->type != ARPHRD_IEEE802154) {
 		dev_put(dev);
 		return NULL;
 	}
@@ -244,7 +243,9 @@
 	addr.mode = IEEE802154_ADDR_LONG;
 	addr.extended_addr = nla_get_hwaddr(
 			info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
-	addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+	rtnl_lock();
+	addr.pan_id = dev->ieee802154_ptr->pan_id;
+	rtnl_unlock();
 
 	ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
 		nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
@@ -281,7 +282,9 @@
 		addr.short_addr = nla_get_shortaddr(
 				info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
 	}
-	addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+	rtnl_lock();
+	addr.pan_id = dev->ieee802154_ptr->pan_id;
+	rtnl_unlock();
 
 	ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
 			nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
@@ -449,11 +452,7 @@
 
 	idx = 0;
 	for_each_netdev(net, dev) {
-		/* Check on mtu is currently a hacked solution because lowpan
-		 * and wpan have the same ARPHRD type.
-		 */
-		if (idx < s_idx || dev->type != ARPHRD_IEEE802154 ||
-		    dev->mtu != IEEE802154_MTU)
+		if (idx < s_idx || dev->type != ARPHRD_IEEE802154)
 			goto cont;
 
 		if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
@@ -510,7 +509,7 @@
 	ops->get_mac_params(dev, &params);
 
 	if (info->attrs[IEEE802154_ATTR_TXPOWER])
-		params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+		params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100;
 
 	if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
 		params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
@@ -519,7 +518,7 @@
 		params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
 
 	if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
-		params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+		params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100;
 
 	if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
 		params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
@@ -783,11 +782,7 @@
 	int rc;
 
 	for_each_netdev(net, dev) {
-		/* Check on mtu is currently a hacked solution because lowpan
-		 * and wpan have the same ARPHRD type.
-		 */
-		if (idx < first_dev || dev->type != ARPHRD_IEEE802154 ||
-		    dev->mtu != IEEE802154_MTU)
+		if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
 			goto skip;
 
 		data.ops = ieee802154_mlme_ops(dev);
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 346c666..77d7301 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -50,26 +50,26 @@
 	if (!hdr)
 		goto out;
 
-	mutex_lock(&phy->pib_lock);
+	rtnl_lock();
 	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
 	    nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
 	    nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
 		goto nla_put_failure;
 	for (i = 0; i < 32; i++) {
-		if (phy->channels_supported[i])
-			buf[pages++] = phy->channels_supported[i] | (i << 27);
+		if (phy->supported.channels[i])
+			buf[pages++] = phy->supported.channels[i] | (i << 27);
 	}
 	if (pages &&
 	    nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
 		    pages * sizeof(uint32_t), buf))
 		goto nla_put_failure;
-	mutex_unlock(&phy->pib_lock);
+	rtnl_unlock();
 	kfree(buf);
 	genlmsg_end(msg, hdr);
 	return 0;
 
 nla_put_failure:
-	mutex_unlock(&phy->pib_lock);
+	rtnl_unlock();
 	genlmsg_cancel(msg, hdr);
 out:
 	kfree(buf);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index f3c12f6..68f2401 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -207,10 +207,11 @@
 	[NL802154_ATTR_PAGE] = { .type = NLA_U8, },
 	[NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
 
-	[NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
+	[NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
 
 	[NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
 	[NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
+	[NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
 
 	[NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
 
@@ -225,6 +226,10 @@
 	[NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
 
 	[NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
+
+	[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
+
+	[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
 };
 
 /* message building helper */
@@ -236,6 +241,28 @@
 }
 
 static int
+nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask)
+{
+	struct nlattr *nl_flags = nla_nest_start(msg, attr);
+	int i;
+
+	if (!nl_flags)
+		return -ENOBUFS;
+
+	i = 0;
+	while (mask) {
+		if ((mask & 1) && nla_put_flag(msg, i))
+			return -ENOBUFS;
+
+		mask >>= 1;
+		i++;
+	}
+
+	nla_nest_end(msg, nl_flags);
+	return 0;
+}
+
+static int
 nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
 				struct sk_buff *msg)
 {
@@ -248,7 +275,7 @@
 
 	for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
 		if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
-				rdev->wpan_phy.channels_supported[page]))
+				rdev->wpan_phy.supported.channels[page]))
 			return -ENOBUFS;
 	}
 	nla_nest_end(msg, nl_page);
@@ -256,12 +283,100 @@
 	return 0;
 }
 
+static int
+nl802154_put_capabilities(struct sk_buff *msg,
+			  struct cfg802154_registered_device *rdev)
+{
+	const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported;
+	struct nlattr *nl_caps, *nl_channels;
+	int i;
+
+	nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS);
+	if (!nl_caps)
+		return -ENOBUFS;
+
+	nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS);
+	if (!nl_channels)
+		return -ENOBUFS;
+
+	for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
+		if (caps->channels[i]) {
+			if (nl802154_put_flags(msg, i, caps->channels[i]))
+				return -ENOBUFS;
+		}
+	}
+
+	nla_nest_end(msg, nl_channels);
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+		struct nlattr *nl_ed_lvls;
+
+		nl_ed_lvls = nla_nest_start(msg,
+					    NL802154_CAP_ATTR_CCA_ED_LEVELS);
+		if (!nl_ed_lvls)
+			return -ENOBUFS;
+
+		for (i = 0; i < caps->cca_ed_levels_size; i++) {
+			if (nla_put_s32(msg, i, caps->cca_ed_levels[i]))
+				return -ENOBUFS;
+		}
+
+		nla_nest_end(msg, nl_ed_lvls);
+	}
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+		struct nlattr *nl_tx_pwrs;
+
+		nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS);
+		if (!nl_tx_pwrs)
+			return -ENOBUFS;
+
+		for (i = 0; i < caps->tx_powers_size; i++) {
+			if (nla_put_s32(msg, i, caps->tx_powers[i]))
+				return -ENOBUFS;
+		}
+
+		nla_nest_end(msg, nl_tx_pwrs);
+	}
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+		if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES,
+				       caps->cca_modes) ||
+		    nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS,
+				       caps->cca_opts))
+			return -ENOBUFS;
+	}
+
+	if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) ||
+	    nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) ||
+	    nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) ||
+	    nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) ||
+	    nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+		       caps->min_csma_backoffs) ||
+	    nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+		       caps->max_csma_backoffs) ||
+	    nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+		       caps->min_frame_retries) ||
+	    nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+		       caps->max_frame_retries) ||
+	    nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES,
+			       caps->iftypes) ||
+	    nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt))
+		return -ENOBUFS;
+
+	nla_nest_end(msg, nl_caps);
+
+	return 0;
+}
+
 static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
 				  enum nl802154_commands cmd,
 				  struct sk_buff *msg, u32 portid, u32 seq,
 				  int flags)
 {
+	struct nlattr *nl_cmds;
 	void *hdr;
+	int i;
 
 	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
 	if (!hdr)
@@ -286,25 +401,76 @@
 		       rdev->wpan_phy.current_channel))
 		goto nla_put_failure;
 
-	/* supported channels array */
+	/* TODO remove this behaviour, we still keep support it for a while
+	 * so users can change the behaviour to the new one.
+	 */
 	if (nl802154_send_wpan_phy_channels(rdev, msg))
 		goto nla_put_failure;
 
 	/* cca mode */
-	if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
-			rdev->wpan_phy.cca.mode))
-		goto nla_put_failure;
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+		if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+				rdev->wpan_phy.cca.mode))
+			goto nla_put_failure;
 
-	if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
-		if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
-				rdev->wpan_phy.cca.opt))
+		if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+			if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+					rdev->wpan_phy.cca.opt))
+				goto nla_put_failure;
+		}
+	}
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+		if (nla_put_s32(msg, NL802154_ATTR_TX_POWER,
+				rdev->wpan_phy.transmit_power))
 			goto nla_put_failure;
 	}
 
-	if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
-		       rdev->wpan_phy.transmit_power))
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+		if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL,
+				rdev->wpan_phy.cca_ed_level))
+			goto nla_put_failure;
+	}
+
+	if (nl802154_put_capabilities(msg, rdev))
 		goto nla_put_failure;
 
+	nl_cmds = nla_nest_start(msg, NL802154_ATTR_SUPPORTED_COMMANDS);
+	if (!nl_cmds)
+		goto nla_put_failure;
+
+	i = 0;
+#define CMD(op, n)							\
+	do {								\
+		if (rdev->ops->op) {					\
+			i++;						\
+			if (nla_put_u32(msg, i, NL802154_CMD_ ## n))	\
+				goto nla_put_failure;			\
+		}							\
+	} while (0)
+
+	CMD(add_virtual_intf, NEW_INTERFACE);
+	CMD(del_virtual_intf, DEL_INTERFACE);
+	CMD(set_channel, SET_CHANNEL);
+	CMD(set_pan_id, SET_PAN_ID);
+	CMD(set_short_addr, SET_SHORT_ADDR);
+	CMD(set_backoff_exponent, SET_BACKOFF_EXPONENT);
+	CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
+	CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
+	CMD(set_lbt_mode, SET_LBT_MODE);
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
+		CMD(set_tx_power, SET_TX_POWER);
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL)
+		CMD(set_cca_ed_level, SET_CCA_ED_LEVEL);
+
+	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE)
+		CMD(set_cca_mode, SET_CCA_MODE);
+
+#undef CMD
+	nla_nest_end(msg, nl_cmds);
+
 finish:
 	genlmsg_end(msg, hdr);
 	return 0;
@@ -575,7 +741,8 @@
 
 	if (info->attrs[NL802154_ATTR_IFTYPE]) {
 		type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
-		if (type > NL802154_IFTYPE_MAX)
+		if (type > NL802154_IFTYPE_MAX ||
+		    !(rdev->wpan_phy.supported.iftypes & BIT(type)))
 			return -EINVAL;
 	}
 
@@ -625,7 +792,8 @@
 	channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
 
 	/* check 802.15.4 constraints */
-	if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL)
+	if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
+	    !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
 		return -EINVAL;
 
 	return rdev_set_channel(rdev, page, channel);
@@ -636,12 +804,17 @@
 	struct cfg802154_registered_device *rdev = info->user_ptr[0];
 	struct wpan_phy_cca cca;
 
+	if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE))
+		return -EOPNOTSUPP;
+
 	if (!info->attrs[NL802154_ATTR_CCA_MODE])
 		return -EINVAL;
 
 	cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
 	/* checking 802.15.4 constraints */
-	if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+	if (cca.mode < NL802154_CCA_ENERGY ||
+	    cca.mode > NL802154_CCA_ATTR_MAX ||
+	    !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode)))
 		return -EINVAL;
 
 	if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
@@ -649,13 +822,58 @@
 			return -EINVAL;
 
 		cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
-		if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+		if (cca.opt > NL802154_CCA_OPT_ATTR_MAX ||
+		    !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt)))
 			return -EINVAL;
 	}
 
 	return rdev_set_cca_mode(rdev, &cca);
 }
 
+static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	s32 ed_level;
+	int i;
+
+	if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL))
+		return -EOPNOTSUPP;
+
+	if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL])
+		return -EINVAL;
+
+	ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]);
+
+	for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) {
+		if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i])
+			return rdev_set_cca_ed_level(rdev, ed_level);
+	}
+
+	return -EINVAL;
+}
+
+static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	s32 power;
+	int i;
+
+	if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER))
+		return -EOPNOTSUPP;
+
+	if (!info->attrs[NL802154_ATTR_TX_POWER])
+		return -EINVAL;
+
+	power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]);
+
+	for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) {
+		if (power == rdev->wpan_phy.supported.tx_powers[i])
+			return rdev_set_tx_power(rdev, power);
+	}
+
+	return -EINVAL;
+}
+
 static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -668,14 +886,22 @@
 		return -EBUSY;
 
 	/* don't change address fields on monitor */
-	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
-		return -EINVAL;
-
-	if (!info->attrs[NL802154_ATTR_PAN_ID])
+	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+	    !info->attrs[NL802154_ATTR_PAN_ID])
 		return -EINVAL;
 
 	pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
 
+	/* TODO
+	 * I am not sure about to check here on broadcast pan_id.
+	 * Broadcast is a valid setting, comment from 802.15.4:
+	 * If this value is 0xffff, the device is not associated.
+	 *
+	 * This could useful to simple deassociate an device.
+	 */
+	if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
+		return -EINVAL;
+
 	return rdev_set_pan_id(rdev, wpan_dev, pan_id);
 }
 
@@ -691,14 +917,27 @@
 		return -EBUSY;
 
 	/* don't change address fields on monitor */
-	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
-		return -EINVAL;
-
-	if (!info->attrs[NL802154_ATTR_SHORT_ADDR])
+	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+	    !info->attrs[NL802154_ATTR_SHORT_ADDR])
 		return -EINVAL;
 
 	short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
 
+	/* TODO
+	 * I am not sure about to check here on broadcast short_addr.
+	 * Broadcast is a valid setting, comment from 802.15.4:
+	 * A value of 0xfffe indicates that the device has
+	 * associated but has not been allocated an address. A
+	 * value of 0xffff indicates that the device does not
+	 * have a short address.
+	 *
+	 * I think we should allow to set these settings but
+	 * don't allow to allow socket communication with it.
+	 */
+	if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
+	    short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
+		return -EINVAL;
+
 	return rdev_set_short_addr(rdev, wpan_dev, short_addr);
 }
 
@@ -722,7 +961,11 @@
 	max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
 
 	/* check 802.15.4 constraints */
-	if (max_be < 3 || max_be > 8 || min_be > max_be)
+	if (min_be < rdev->wpan_phy.supported.min_minbe ||
+	    min_be > rdev->wpan_phy.supported.max_minbe ||
+	    max_be < rdev->wpan_phy.supported.min_maxbe ||
+	    max_be > rdev->wpan_phy.supported.max_maxbe ||
+	    min_be > max_be)
 		return -EINVAL;
 
 	return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
@@ -747,7 +990,8 @@
 			info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
 
 	/* check 802.15.4 constraints */
-	if (max_csma_backoffs > 5)
+	if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs ||
+	    max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs)
 		return -EINVAL;
 
 	return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
@@ -771,7 +1015,8 @@
 			info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
 
 	/* check 802.15.4 constraints */
-	if (max_frame_retries < -1 || max_frame_retries > 7)
+	if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries ||
+	    max_frame_retries > rdev->wpan_phy.supported.max_frame_retries)
 		return -EINVAL;
 
 	return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
@@ -791,6 +1036,9 @@
 		return -EINVAL;
 
 	mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+	if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
+		return -EINVAL;
+
 	return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
@@ -937,6 +1185,22 @@
 				  NL802154_FLAG_NEED_RTNL,
 	},
 	{
+		.cmd = NL802154_CMD_SET_CCA_ED_LEVEL,
+		.doit = nl802154_set_cca_ed_level,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_SET_TX_POWER,
+		.doit = nl802154_set_tx_power,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
 		.cmd = NL802154_CMD_SET_PAN_ID,
 		.doit = nl802154_set_pan_id,
 		.policy = nl802154_policy,
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 7b5a9dd..b2155a1 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -75,6 +75,29 @@
 }
 
 static inline int
+rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level)
+{
+	int ret;
+
+	trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level);
+	ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_tx_power(struct cfg802154_registered_device *rdev,
+		  s32 power)
+{
+	int ret;
+
+	trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power);
+	ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
+static inline int
 rdev_set_pan_id(struct cfg802154_registered_device *rdev,
 		struct wpan_dev *wpan_dev, __le16 pan_id)
 {
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index b60c65f..b6eacf3 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -64,10 +64,8 @@
 			if (tmp->type != ARPHRD_IEEE802154)
 				continue;
 
-			pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
-			short_addr =
-				ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
+			pan_id = tmp->ieee802154_ptr->pan_id;
+			short_addr = tmp->ieee802154_ptr->short_addr;
 			if (pan_id == addr->pan_id &&
 			    short_addr == addr->short_addr) {
 				dev = tmp;
@@ -228,15 +226,9 @@
 		goto out;
 	}
 
-	if (dev->type != ARPHRD_IEEE802154) {
-		err = -ENODEV;
-		goto out_put;
-	}
-
 	sk->sk_bound_dev_if = dev->ifindex;
 	sk_dst_reset(sk);
 
-out_put:
 	dev_put(dev);
 out:
 	release_sock(sk);
@@ -286,7 +278,7 @@
 
 	if (size > mtu) {
 		pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-		err = -EINVAL;
+		err = -EMSGSIZE;
 		goto out_dev;
 	}
 
@@ -739,6 +731,12 @@
 	sock_recv_ts_and_drops(msg, sk, skb);
 
 	if (saddr) {
+		/* Clear the implicit padding in struct sockaddr_ieee802154
+		 * (16 bits between 'family' and 'addr') and in struct
+		 * ieee802154_addr_sa (16 bits at the end of the structure).
+		 */
+		memset(saddr, 0, sizeof(*saddr));
+
 		saddr->family = AF_IEEE802154;
 		ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
 		*addr_len = sizeof(*saddr);
@@ -797,9 +795,9 @@
 	/* Data frame processing */
 	BUG_ON(dev->type != ARPHRD_IEEE802154);
 
-	pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-	short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
-	hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+	pan_id = dev->ieee802154_ptr->pan_id;
+	short_addr = dev->ieee802154_ptr->short_addr;
+	hw_addr = dev->ieee802154_ptr->extended_addr;
 
 	read_lock(&dgram_lock);
 	sk_for_each(sk, &dgram_head) {
@@ -1014,7 +1012,7 @@
 	}
 
 	rc = -ENOMEM;
-	sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
+	sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto, kern);
 	if (!sk)
 		goto out;
 	rc = 0;
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 5ac25eb..9b5f0eb 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -1,4 +1,4 @@
-/* Based on net/wireless/tracing.h */
+/* Based on net/wireless/trace.h */
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM cfg802154
@@ -56,7 +56,7 @@
 		__entry->type = type;
 		__entry->extended_addr = extended_addr;
 	),
-	TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, ea %llx",
+	TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, extended addr: 0x%llx",
 		  WPAN_PHY_PR_ARG, __get_str(vir_intf_name), __entry->type,
 		  __le64_to_cpu(__entry->extended_addr))
 );
@@ -93,6 +93,21 @@
 		  __entry->page, __entry->channel)
 );
 
+TRACE_EVENT(802154_rdev_set_tx_power,
+	TP_PROTO(struct wpan_phy *wpan_phy, s32 power),
+	TP_ARGS(wpan_phy, power),
+	TP_STRUCT__entry(
+		WPAN_PHY_ENTRY
+		__field(s32, power)
+	),
+	TP_fast_assign(
+		WPAN_PHY_ASSIGN;
+		__entry->power = power;
+	),
+	TP_printk(WPAN_PHY_PR_FMT ", mbm: %d", WPAN_PHY_PR_ARG,
+		  __entry->power)
+);
+
 TRACE_EVENT(802154_rdev_set_cca_mode,
 	TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
 	TP_ARGS(wpan_phy, cca),
@@ -108,6 +123,21 @@
 		  WPAN_CCA_PR_ARG)
 );
 
+TRACE_EVENT(802154_rdev_set_cca_ed_level,
+	TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level),
+	TP_ARGS(wpan_phy, ed_level),
+	TP_STRUCT__entry(
+		WPAN_PHY_ENTRY
+		__field(s32, ed_level)
+	),
+	TP_fast_assign(
+		WPAN_PHY_ASSIGN;
+		__entry->ed_level = ed_level;
+	),
+	TP_printk(WPAN_PHY_PR_FMT ", ed level: %d", WPAN_PHY_PR_ARG,
+		  __entry->ed_level)
+);
+
 DECLARE_EVENT_CLASS(802154_le16_template,
 	TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 		 __le16 le16arg),
@@ -137,7 +167,7 @@
 	TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 		 __le16 le16arg),
 	TP_ARGS(wpan_phy, wpan_dev, le16arg),
-	TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", sa: 0x%04x",
+	TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", short addr: 0x%04x",
 		  WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
 		  __le16_to_cpu(__entry->le16arg))
 );
@@ -160,7 +190,7 @@
 	),
 
 	TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
-		  ", min be: %d, max_be: %d", WPAN_PHY_PR_ARG,
+		  ", min be: %d, max be: %d", WPAN_PHY_PR_ARG,
 		  WPAN_DEV_PR_ARG, __entry->min_be, __entry->max_be)
 );
 
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index bd29016..6fb3c90 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -331,8 +331,8 @@
 	  When this option is enabled IP tunnels can be configured to use
 	  FOU or GUE encapsulation.
 
-config GENEVE
-	tristate "Generic Network Virtualization Encapsulation (Geneve)"
+config GENEVE_CORE
+	tristate "Generic Network Virtualization Encapsulation library"
 	depends on INET
 	select NET_UDP_TUNNEL
 	---help---
@@ -615,6 +615,22 @@
 	For further details see:
 	  http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
 
+config TCP_CONG_CDG
+	tristate "CAIA Delay-Gradient (CDG)"
+	default n
+	---help---
+	CAIA Delay-Gradient (CDG) is a TCP congestion control that modifies
+	the TCP sender in order to:
+
+	  o Use the delay gradient as a congestion signal.
+	  o Back off with an average probability that is independent of the RTT.
+	  o Coexist with flows that use loss-based congestion control.
+	  o Tolerate packet loss unrelated to congestion.
+
+	For further details see:
+	  D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
+	  delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+
 choice
 	prompt "Default TCP congestion control"
 	default DEFAULT_CUBIC
@@ -646,6 +662,9 @@
 	config DEFAULT_DCTCP
 		bool "DCTCP" if TCP_CONG_DCTCP=y
 
+	config DEFAULT_CDG
+		bool "CDG" if TCP_CONG_CDG=y
+
 	config DEFAULT_RENO
 		bool "Reno"
 endchoice
@@ -668,6 +687,7 @@
 	default "veno" if DEFAULT_VENO
 	default "reno" if DEFAULT_RENO
 	default "dctcp" if DEFAULT_DCTCP
+	default "cdg" if DEFAULT_CDG
 	default "cubic"
 
 config TCP_MD5SIG
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 518c04e..efc43f3 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -42,6 +42,7 @@
 obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
+obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
 obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
 obj-$(CONFIG_TCP_CONG_DCTCP) += tcp_dctcp.o
 obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o
@@ -56,7 +57,7 @@
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
 obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GENEVE_CORE) += geneve_core.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
 		      xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8b47a4d..9532ee8 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -228,6 +228,8 @@
 				err = 0;
 			if (err)
 				goto out;
+
+			tcp_fastopen_init_key_once(true);
 		}
 		err = inet_csk_listen_start(sk, backlog);
 		if (err)
@@ -317,7 +319,7 @@
 	WARN_ON(!answer_prot->slab);
 
 	err = -ENOBUFS;
-	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
+	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
 	if (!sk)
 		goto out;
 
@@ -488,7 +490,8 @@
 		inet->inet_saddr = 0;  /* Use device */
 
 	/* Make sure we are allowed to bind here. */
-	if (sk->sk_prot->get_port(sk, snum)) {
+	if ((snum || !inet->bind_address_no_port) &&
+	    sk->sk_prot->get_port(sk, snum)) {
 		inet->inet_saddr = inet->inet_rcv_saddr = 0;
 		err = -EADDRINUSE;
 		goto out_release_sock;
@@ -1430,7 +1433,7 @@
 			 struct net *net)
 {
 	struct socket *sock;
-	int rc = sock_create_kern(family, type, protocol, &sock);
+	int rc = sock_create_kern(net, family, type, protocol, &sock);
 
 	if (rc == 0) {
 		*sk = sock->sk;
@@ -1440,8 +1443,6 @@
 		 * we do not wish this socket to see incoming packets.
 		 */
 		(*sk)->sk_prot->unhash(*sk);
-
-		sk_change_net(*sk, net);
 	}
 	return rc;
 }
@@ -1597,7 +1598,7 @@
 	 */
 	seqlock_init(&net->ipv4.ip_local_ports.lock);
 	net->ipv4.ip_local_ports.range[0] =  32768;
-	net->ipv4.ip_local_ports.range[1] =  61000;
+	net->ipv4.ip_local_ports.range[1] =  60999;
 
 	seqlock_init(&net->ipv4.ping_group_range.lock);
 	/*
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 419d23c..7498716 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2169,6 +2169,8 @@
 					"igmpv2_unsolicited_report_interval"),
 		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
 					"igmpv3_unsolicited_report_interval"),
+		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
+					"ignore_routes_with_linkdown"),
 
 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 872494e..6bbc549 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -280,7 +280,7 @@
 		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 		fl4.flowi4_scope = scope;
 		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
-		if (!fib_lookup(net, &fl4, &res))
+		if (!fib_lookup(net, &fl4, &res, 0))
 			return FIB_RES_PREFSRC(net, res);
 	} else {
 		scope = RT_SCOPE_LINK;
@@ -319,7 +319,7 @@
 	fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
 
 	net = dev_net(dev);
-	if (fib_lookup(net, &fl4, &res))
+	if (fib_lookup(net, &fl4, &res, 0))
 		goto last_resort;
 	if (res.type != RTN_UNICAST &&
 	    (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
@@ -354,7 +354,7 @@
 	fl4.flowi4_oif = dev->ifindex;
 
 	ret = 0;
-	if (fib_lookup(net, &fl4, &res) == 0) {
+	if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
 		if (res.type == RTN_UNICAST)
 			ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
 	}
@@ -1063,9 +1063,9 @@
 	net->ipv4.fibnl = NULL;
 }
 
-static void fib_disable_ip(struct net_device *dev, int force)
+static void fib_disable_ip(struct net_device *dev, unsigned long event)
 {
-	if (fib_sync_down_dev(dev, force))
+	if (fib_sync_down_dev(dev, event))
 		fib_flush(dev_net(dev));
 	rt_cache_flush(dev_net(dev));
 	arp_ifdown(dev);
@@ -1081,7 +1081,7 @@
 	case NETDEV_UP:
 		fib_add_ifaddr(ifa);
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-		fib_sync_up(dev);
+		fib_sync_up(dev, RTNH_F_DEAD);
 #endif
 		atomic_inc(&net->ipv4.dev_addr_genid);
 		rt_cache_flush(dev_net(dev));
@@ -1093,7 +1093,7 @@
 			/* Last address was deleted from this interface.
 			 * Disable IP.
 			 */
-			fib_disable_ip(dev, 1);
+			fib_disable_ip(dev, event);
 		} else {
 			rt_cache_flush(dev_net(dev));
 		}
@@ -1107,9 +1107,10 @@
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct in_device *in_dev;
 	struct net *net = dev_net(dev);
+	unsigned int flags;
 
 	if (event == NETDEV_UNREGISTER) {
-		fib_disable_ip(dev, 2);
+		fib_disable_ip(dev, event);
 		rt_flush_dev(dev);
 		return NOTIFY_DONE;
 	}
@@ -1124,16 +1125,22 @@
 			fib_add_ifaddr(ifa);
 		} endfor_ifa(in_dev);
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-		fib_sync_up(dev);
+		fib_sync_up(dev, RTNH_F_DEAD);
 #endif
 		atomic_inc(&net->ipv4.dev_addr_genid);
 		rt_cache_flush(net);
 		break;
 	case NETDEV_DOWN:
-		fib_disable_ip(dev, 0);
+		fib_disable_ip(dev, event);
 		break;
-	case NETDEV_CHANGEMTU:
 	case NETDEV_CHANGE:
+		flags = dev_get_flags(dev);
+		if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+			fib_sync_up(dev, RTNH_F_LINKDOWN);
+		else
+			fib_sync_down_dev(dev, event);
+		/* fall through */
+	case NETDEV_CHANGEMTU:
 		rt_cache_flush(net);
 		break;
 	}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 5615198..18123d5 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -47,11 +47,12 @@
 #endif
 };
 
-int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
+int __fib_lookup(struct net *net, struct flowi4 *flp,
+		 struct fib_result *res, unsigned int flags)
 {
 	struct fib_lookup_arg arg = {
 		.result = res,
-		.flags = FIB_LOOKUP_NOREF,
+		.flags = flags,
 	};
 	int err;
 
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8d695b6..3bfccd8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -266,7 +266,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
-		    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
+		    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
 			return -1;
 		onh++;
 	} endfor_nexthops(fi);
@@ -318,7 +318,7 @@
 		    nfi->fib_type == fi->fib_type &&
 		    memcmp(nfi->fib_metrics, fi->fib_metrics,
 			   sizeof(u32) * RTAX_MAX) == 0 &&
-		    ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
+		    !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
 		    (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
 			return fi;
 	}
@@ -604,6 +604,8 @@
 				return -ENODEV;
 			if (!(dev->flags & IFF_UP))
 				return -ENETDOWN;
+			if (!netif_carrier_ok(dev))
+				nh->nh_flags |= RTNH_F_LINKDOWN;
 			nh->nh_dev = dev;
 			dev_hold(dev);
 			nh->nh_scope = RT_SCOPE_LINK;
@@ -621,7 +623,8 @@
 			/* It is not necessary, but requires a bit of thinking */
 			if (fl4.flowi4_scope < RT_SCOPE_LINK)
 				fl4.flowi4_scope = RT_SCOPE_LINK;
-			err = fib_lookup(net, &fl4, &res);
+			err = fib_lookup(net, &fl4, &res,
+					 FIB_LOOKUP_IGNORE_LINKSTATE);
 			if (err) {
 				rcu_read_unlock();
 				return err;
@@ -636,6 +639,8 @@
 		if (!dev)
 			goto out;
 		dev_hold(dev);
+		if (!netif_carrier_ok(dev))
+			nh->nh_flags |= RTNH_F_LINKDOWN;
 		err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
 	} else {
 		struct in_device *in_dev;
@@ -654,6 +659,8 @@
 		nh->nh_dev = in_dev->dev;
 		dev_hold(nh->nh_dev);
 		nh->nh_scope = RT_SCOPE_HOST;
+		if (!netif_carrier_ok(nh->nh_dev))
+			nh->nh_flags |= RTNH_F_LINKDOWN;
 		err = 0;
 	}
 out:
@@ -713,8 +720,6 @@
 			struct hlist_head *dest;
 			unsigned int new_hash;
 
-			hlist_del(&fi->fib_hash);
-
 			new_hash = fib_info_hashfn(fi);
 			dest = &new_info_hash[new_hash];
 			hlist_add_head(&fi->fib_hash, dest);
@@ -731,8 +736,6 @@
 			struct hlist_head *ldest;
 			unsigned int new_hash;
 
-			hlist_del(&fi->fib_lhash);
-
 			new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
 			ldest = &new_laddrhash[new_hash];
 			hlist_add_head(&fi->fib_lhash, ldest);
@@ -924,11 +927,17 @@
 		if (!nh->nh_dev)
 			goto failure;
 	} else {
+		int linkdown = 0;
+
 		change_nexthops(fi) {
 			err = fib_check_nh(cfg, fi, nexthop_nh);
 			if (err != 0)
 				goto failure;
+			if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
+				linkdown++;
 		} endfor_nexthops(fi)
+		if (linkdown == fi->fib_nhs)
+			fi->fib_flags |= RTNH_F_LINKDOWN;
 	}
 
 	if (fi->fib_prefsrc) {
@@ -1027,12 +1036,20 @@
 	    nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
 		goto nla_put_failure;
 	if (fi->fib_nhs == 1) {
+		struct in_device *in_dev;
+
 		if (fi->fib_nh->nh_gw &&
 		    nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
 			goto nla_put_failure;
 		if (fi->fib_nh->nh_oif &&
 		    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
 			goto nla_put_failure;
+		if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
+			in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev);
+			if (in_dev &&
+			    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
+				rtm->rtm_flags |= RTNH_F_DEAD;
+		}
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		if (fi->fib_nh[0].nh_tclassid &&
 		    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
@@ -1049,11 +1066,19 @@
 			goto nla_put_failure;
 
 		for_nexthops(fi) {
+			struct in_device *in_dev;
+
 			rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
 			if (!rtnh)
 				goto nla_put_failure;
 
 			rtnh->rtnh_flags = nh->nh_flags & 0xFF;
+			if (nh->nh_flags & RTNH_F_LINKDOWN) {
+				in_dev = __in_dev_get_rcu(nh->nh_dev);
+				if (in_dev &&
+				    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
+					rtnh->rtnh_flags |= RTNH_F_DEAD;
+			}
 			rtnh->rtnh_hops = nh->nh_weight - 1;
 			rtnh->rtnh_ifindex = nh->nh_oif;
 
@@ -1107,7 +1132,7 @@
 	return ret;
 }
 
-int fib_sync_down_dev(struct net_device *dev, int force)
+int fib_sync_down_dev(struct net_device *dev, unsigned long event)
 {
 	int ret = 0;
 	int scope = RT_SCOPE_NOWHERE;
@@ -1116,7 +1141,8 @@
 	struct hlist_head *head = &fib_info_devhash[hash];
 	struct fib_nh *nh;
 
-	if (force)
+	if (event == NETDEV_UNREGISTER ||
+	    event == NETDEV_DOWN)
 		scope = -1;
 
 	hlist_for_each_entry(nh, head, nh_hash) {
@@ -1133,7 +1159,15 @@
 				dead++;
 			else if (nexthop_nh->nh_dev == dev &&
 				 nexthop_nh->nh_scope != scope) {
-				nexthop_nh->nh_flags |= RTNH_F_DEAD;
+				switch (event) {
+				case NETDEV_DOWN:
+				case NETDEV_UNREGISTER:
+					nexthop_nh->nh_flags |= RTNH_F_DEAD;
+					/* fall through */
+				case NETDEV_CHANGE:
+					nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
+					break;
+				}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 				spin_lock_bh(&fib_multipath_lock);
 				fi->fib_power -= nexthop_nh->nh_power;
@@ -1143,14 +1177,23 @@
 				dead++;
 			}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-			if (force > 1 && nexthop_nh->nh_dev == dev) {
+			if (event == NETDEV_UNREGISTER &&
+			    nexthop_nh->nh_dev == dev) {
 				dead = fi->fib_nhs;
 				break;
 			}
 #endif
 		} endfor_nexthops(fi)
 		if (dead == fi->fib_nhs) {
-			fi->fib_flags |= RTNH_F_DEAD;
+			switch (event) {
+			case NETDEV_DOWN:
+			case NETDEV_UNREGISTER:
+				fi->fib_flags |= RTNH_F_DEAD;
+				/* fall through */
+			case NETDEV_CHANGE:
+				fi->fib_flags |= RTNH_F_LINKDOWN;
+				break;
+			}
 			ret++;
 		}
 	}
@@ -1214,13 +1257,11 @@
 	return;
 }
 
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-
 /*
  * Dead device goes up. We wake up dead nexthops.
  * It takes sense only on multipath routes.
  */
-int fib_sync_up(struct net_device *dev)
+int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
 {
 	struct fib_info *prev_fi;
 	unsigned int hash;
@@ -1247,7 +1288,7 @@
 		prev_fi = fi;
 		alive = 0;
 		change_nexthops(fi) {
-			if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
+			if (!(nexthop_nh->nh_flags & nh_flags)) {
 				alive++;
 				continue;
 			}
@@ -1258,14 +1299,18 @@
 			    !__in_dev_get_rtnl(dev))
 				continue;
 			alive++;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
 			spin_lock_bh(&fib_multipath_lock);
 			nexthop_nh->nh_power = 0;
-			nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
+			nexthop_nh->nh_flags &= ~nh_flags;
 			spin_unlock_bh(&fib_multipath_lock);
+#else
+			nexthop_nh->nh_flags &= ~nh_flags;
+#endif
 		} endfor_nexthops(fi)
 
 		if (alive > 0) {
-			fi->fib_flags &= ~RTNH_F_DEAD;
+			fi->fib_flags &= ~nh_flags;
 			ret++;
 		}
 	}
@@ -1273,6 +1318,8 @@
 	return ret;
 }
 
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+
 /*
  * The algorithm is suboptimal, but it provides really
  * fair weighted route distribution.
@@ -1280,16 +1327,22 @@
 void fib_select_multipath(struct fib_result *res)
 {
 	struct fib_info *fi = res->fi;
+	struct in_device *in_dev;
 	int w;
 
 	spin_lock_bh(&fib_multipath_lock);
 	if (fi->fib_power <= 0) {
 		int power = 0;
 		change_nexthops(fi) {
-			if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
-				power += nexthop_nh->nh_weight;
-				nexthop_nh->nh_power = nexthop_nh->nh_weight;
-			}
+			in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev);
+			if (nexthop_nh->nh_flags & RTNH_F_DEAD)
+				continue;
+			if (in_dev &&
+			    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+			    nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
+				continue;
+			power += nexthop_nh->nh_weight;
+			nexthop_nh->nh_power = nexthop_nh->nh_weight;
 		} endfor_nexthops(fi);
 		fi->fib_power = power;
 		if (power <= 0) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 09b62e1..15d3261 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -72,6 +72,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
 #include <net/protocol.h>
@@ -324,13 +325,15 @@
 
 static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
 {
-	struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
-	struct key_vector *l = kv->kv;
+	struct key_vector *l;
+	struct tnode *kv;
 
+	kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
 	if (!kv)
 		return NULL;
 
 	/* initialize key vector */
+	l = kv->kv;
 	l->key = key;
 	l->pos = 0;
 	l->bits = 0;
@@ -345,24 +348,26 @@
 
 static struct key_vector *tnode_new(t_key key, int pos, int bits)
 {
-	struct tnode *tnode = tnode_alloc(bits);
 	unsigned int shift = pos + bits;
-	struct key_vector *tn = tnode->kv;
+	struct key_vector *tn;
+	struct tnode *tnode;
 
 	/* verify bits and pos their msb bits clear and values are valid */
 	BUG_ON(!bits || (shift > KEYLENGTH));
 
-	pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
-		 sizeof(struct key_vector *) << bits);
-
+	tnode = tnode_alloc(bits);
 	if (!tnode)
 		return NULL;
 
+	pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
+		 sizeof(struct key_vector *) << bits);
+
 	if (bits == KEYLENGTH)
 		tnode->full_children = 1;
 	else
 		tnode->empty_children = 1ul << bits;
 
+	tn = tnode->kv;
 	tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
 	tn->pos = pos;
 	tn->bits = bits;
@@ -1077,6 +1082,7 @@
 	struct trie *t = (struct trie *)tb->tb_data;
 	struct fib_alias *fa, *new_fa;
 	struct key_vector *l, *tp;
+	unsigned int nlflags = 0;
 	struct fib_info *fi;
 	u8 plen = cfg->fc_dst_len;
 	u8 slen = KEYLENGTH - plen;
@@ -1166,13 +1172,13 @@
 			new_fa->fa_slen = fa->fa_slen;
 			new_fa->tb_id = tb->tb_id;
 
-			err = netdev_switch_fib_ipv4_add(key, plen, fi,
-							 new_fa->fa_tos,
-							 cfg->fc_type,
-							 cfg->fc_nlflags,
-							 tb->tb_id);
+			err = switchdev_fib_ipv4_add(key, plen, fi,
+						     new_fa->fa_tos,
+						     cfg->fc_type,
+						     cfg->fc_nlflags,
+						     tb->tb_id);
 			if (err) {
-				netdev_switch_fib_ipv4_abort(fi);
+				switchdev_fib_ipv4_abort(fi);
 				kmem_cache_free(fn_alias_kmem, new_fa);
 				goto out;
 			}
@@ -1196,7 +1202,9 @@
 		if (fa_match)
 			goto out;
 
-		if (!(cfg->fc_nlflags & NLM_F_APPEND))
+		if (cfg->fc_nlflags & NLM_F_APPEND)
+			nlflags = NLM_F_APPEND;
+		else
 			fa = fa_first;
 	}
 	err = -ENOENT;
@@ -1216,12 +1224,10 @@
 	new_fa->tb_id = tb->tb_id;
 
 	/* (Optionally) offload fib entry to switch hardware. */
-	err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
-					 cfg->fc_type,
-					 cfg->fc_nlflags,
-					 tb->tb_id);
+	err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
+				     cfg->fc_nlflags, tb->tb_id);
 	if (err) {
-		netdev_switch_fib_ipv4_abort(fi);
+		switchdev_fib_ipv4_abort(fi);
 		goto out_free_new_fa;
 	}
 
@@ -1235,12 +1241,12 @@
 
 	rt_cache_flush(cfg->fc_nlinfo.nl_net);
 	rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
-		  &cfg->fc_nlinfo, 0);
+		  &cfg->fc_nlinfo, nlflags);
 succeeded:
 	return 0;
 
 out_sw_fib_del:
-	netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
+	switchdev_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
 out_free_new_fa:
 	kmem_cache_free(fn_alias_kmem, new_fa);
 out:
@@ -1406,9 +1412,15 @@
 			continue;
 		for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
 			const struct fib_nh *nh = &fi->fib_nh[nhsel];
+			struct in_device *in_dev = __in_dev_get_rcu(nh->nh_dev);
 
 			if (nh->nh_flags & RTNH_F_DEAD)
 				continue;
+			if (in_dev &&
+			    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+			    nh->nh_flags & RTNH_F_LINKDOWN &&
+			    !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
+				continue;
 			if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
 				continue;
 
@@ -1518,8 +1530,8 @@
 	if (!fa_to_delete)
 		return -ESRCH;
 
-	netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
-				   cfg->fc_type, tb->tb_id);
+	switchdev_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
+			       cfg->fc_type, tb->tb_id);
 
 	rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
 		  &cfg->fc_nlinfo, 0);
@@ -1768,10 +1780,9 @@
 			if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
 				continue;
 
-			netdev_switch_fib_ipv4_del(n->key,
-						   KEYLENGTH - fa->fa_slen,
-						   fi, fa->fa_tos,
-						   fa->fa_type, tb->tb_id);
+			switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
+					       fi, fa->fa_tos, fa->fa_type,
+					       tb->tb_id);
 		}
 
 		/* update leaf slen */
@@ -1836,10 +1847,9 @@
 				continue;
 			}
 
-			netdev_switch_fib_ipv4_del(n->key,
-						   KEYLENGTH - fa->fa_slen,
-						   fi, fa->fa_tos,
-						   fa->fa_type, tb->tb_id);
+			switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
+					       fi, fa->fa_tos, fa->fa_type,
+					       tb->tb_id);
 			hlist_del_rcu(&fa->fa_list);
 			fib_release_info(fa->fa_info);
 			alias_free_mem_rcu(fa);
@@ -2057,11 +2067,12 @@
 static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
 					     struct trie *t)
 {
-	struct key_vector *n, *pn = t->kv;
+	struct key_vector *n, *pn;
 
 	if (!t)
 		return NULL;
 
+	pn = t->kv;
 	n = rcu_dereference(pn->tnode[0]);
 	if (!n)
 		return NULL;
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve_core.c
similarity index 97%
rename from net/ipv4/geneve.c
rename to net/ipv4/geneve_core.c
index 8986e63..311a4ba 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve_core.c
@@ -60,11 +60,6 @@
 
 static int geneve_net_id;
 
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
-	return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
 static struct geneve_sock *geneve_find_sock(struct net *net,
 					    sa_family_t family, __be16 port)
 {
@@ -435,7 +430,7 @@
 	if (rc)
 		return rc;
 
-	pr_info("Geneve driver\n");
+	pr_info("Geneve core logic\n");
 
 	return 0;
 }
@@ -449,5 +444,4 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
-MODULE_DESCRIPTION("Driver for GENEVE encapsulated traffic");
-MODULE_ALIAS_RTNL_LINK("geneve");
+MODULE_DESCRIPTION("Driver library for GENEVE encapsulated traffic");
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a3a697f..651cdf6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1339,6 +1339,168 @@
 }
 EXPORT_SYMBOL(ip_mc_inc_group);
 
+static int ip_mc_check_iphdr(struct sk_buff *skb)
+{
+	const struct iphdr *iph;
+	unsigned int len;
+	unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
+
+	if (!pskb_may_pull(skb, offset))
+		return -EINVAL;
+
+	iph = ip_hdr(skb);
+
+	if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
+		return -EINVAL;
+
+	offset += ip_hdrlen(skb) - sizeof(*iph);
+
+	if (!pskb_may_pull(skb, offset))
+		return -EINVAL;
+
+	iph = ip_hdr(skb);
+
+	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+		return -EINVAL;
+
+	len = skb_network_offset(skb) + ntohs(iph->tot_len);
+	if (skb->len < len || len < offset)
+		return -EINVAL;
+
+	skb_set_transport_header(skb, offset);
+
+	return 0;
+}
+
+static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
+{
+	unsigned int len = skb_transport_offset(skb);
+
+	len += sizeof(struct igmpv3_report);
+
+	return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+}
+
+static int ip_mc_check_igmp_query(struct sk_buff *skb)
+{
+	unsigned int len = skb_transport_offset(skb);
+
+	len += sizeof(struct igmphdr);
+	if (skb->len < len)
+		return -EINVAL;
+
+	/* IGMPv{1,2}? */
+	if (skb->len != len) {
+		/* or IGMPv3? */
+		len += sizeof(struct igmpv3_query) - sizeof(struct igmphdr);
+		if (skb->len < len || !pskb_may_pull(skb, len))
+			return -EINVAL;
+	}
+
+	/* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
+	 * all-systems destination addresses (224.0.0.1) for general queries
+	 */
+	if (!igmp_hdr(skb)->group &&
+	    ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ip_mc_check_igmp_msg(struct sk_buff *skb)
+{
+	switch (igmp_hdr(skb)->type) {
+	case IGMP_HOST_LEAVE_MESSAGE:
+	case IGMP_HOST_MEMBERSHIP_REPORT:
+	case IGMPV2_HOST_MEMBERSHIP_REPORT:
+		/* fall through */
+		return 0;
+	case IGMPV3_HOST_MEMBERSHIP_REPORT:
+		return ip_mc_check_igmp_reportv3(skb);
+	case IGMP_HOST_MEMBERSHIP_QUERY:
+		return ip_mc_check_igmp_query(skb);
+	default:
+		return -ENOMSG;
+	}
+}
+
+static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
+{
+	return skb_checksum_simple_validate(skb);
+}
+
+static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+
+{
+	struct sk_buff *skb_chk;
+	unsigned int transport_len;
+	unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
+	int ret;
+
+	transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
+
+	skb_get(skb);
+	skb_chk = skb_checksum_trimmed(skb, transport_len,
+				       ip_mc_validate_checksum);
+	if (!skb_chk)
+		return -EINVAL;
+
+	if (!pskb_may_pull(skb_chk, len)) {
+		kfree_skb(skb_chk);
+		return -EINVAL;
+	}
+
+	ret = ip_mc_check_igmp_msg(skb_chk);
+	if (ret) {
+		kfree_skb(skb_chk);
+		return ret;
+	}
+
+	if (skb_trimmed)
+		*skb_trimmed = skb_chk;
+	else
+		kfree_skb(skb_chk);
+
+	return 0;
+}
+
+/**
+ * ip_mc_check_igmp - checks whether this is a sane IGMP packet
+ * @skb: the skb to validate
+ * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
+ *
+ * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
+ * skb network and transport headers accordingly and returns zero.
+ *
+ * -EINVAL: A broken packet was detected, i.e. it violates some internet
+ *  standard
+ * -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
+ * -ENOMEM: A memory allocation failure happened.
+ *
+ * Optionally, an skb pointer might be provided via skb_trimmed (or set it
+ * to NULL): After parsing an IGMP packet successfully it will point to
+ * an skb which has its tail aligned to the IP packet end. This might
+ * either be the originally provided skb or a trimmed, cloned version if
+ * the skb frame had data beyond the IP packet. A cloned skb allows us
+ * to leave the original skb and its full frame unchanged (which might be
+ * desirable for layer 2 frame jugglers).
+ *
+ * The caller needs to release a reference count from any returned skb_trimmed.
+ */
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+{
+	int ret = ip_mc_check_iphdr(skb);
+
+	if (ret < 0)
+		return ret;
+
+	if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
+		return -ENOMSG;
+
+	return __ip_mc_check_igmp(skb, skb_trimmed);
+}
+EXPORT_SYMBOL(ip_mc_check_igmp);
+
 /*
  *	Resend IGMP JOIN report; used by netdev notifier.
  */
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8976ca4..60021d0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -99,6 +99,7 @@
 	struct net *net = sock_net(sk);
 	int smallest_size = -1, smallest_rover;
 	kuid_t uid = sock_i_uid(sk);
+	int attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 
 	local_bh_disable();
 	if (!snum) {
@@ -106,6 +107,14 @@
 
 again:
 		inet_get_local_port_range(net, &low, &high);
+		if (attempt_half) {
+			int half = low + ((high - low) >> 1);
+
+			if (attempt_half == 1)
+				high = half;
+			else
+				low = half;
+		}
 		remaining = (high - low) + 1;
 		smallest_rover = rover = prandom_u32() % remaining + low;
 
@@ -127,11 +136,6 @@
 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
-						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
-						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
-							snum = smallest_rover;
-							goto tb_found;
-						}
 					}
 					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 						snum = rover;
@@ -159,6 +163,11 @@
 				snum = smallest_rover;
 				goto have_snum;
 			}
+			if (attempt_half == 1) {
+				/* OK we now try the upper half of the range */
+				attempt_half = 2;
+				goto again;
+			}
 			goto fail;
 		}
 		/* OK, here is the one we will use.  HEAD is
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 4d32262..9bc2667 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -151,6 +151,10 @@
 			if (nla_put_u8(skb, INET_DIAG_TCLASS,
 				       inet6_sk(sk)->tclass) < 0)
 				goto errout;
+
+		if (ipv6_only_sock(sk) &&
+		    nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1))
+			goto errout;
 	}
 #endif
 
@@ -200,9 +204,9 @@
 	}
 #undef EXPIRES_IN_MS
 
-	if (ext & (1 << (INET_DIAG_INFO - 1))) {
+	if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
 		attr = nla_reserve(skb, INET_DIAG_INFO,
-				   sizeof(struct tcp_info));
+				   handler->idiag_info_size);
 		if (!attr)
 			goto errout;
 
@@ -746,7 +750,7 @@
 
 	entry.family = sk->sk_family;
 
-	spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
 
 	lopt = icsk->icsk_accept_queue.listen_opt;
 	if (!lopt || !listen_sock_qlen(lopt))
@@ -794,7 +798,7 @@
 	}
 
 out:
-	spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
 	return err;
 }
@@ -1078,14 +1082,62 @@
 	return inet_diag_get_exact(skb, h, nlmsg_data(h));
 }
 
+static
+int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
+{
+	const struct inet_diag_handler *handler;
+	struct nlmsghdr *nlh;
+	struct nlattr *attr;
+	struct inet_diag_msg *r;
+	void *info = NULL;
+	int err = 0;
+
+	nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0);
+	if (!nlh)
+		return -ENOMEM;
+
+	r = nlmsg_data(nlh);
+	memset(r, 0, sizeof(*r));
+	inet_diag_msg_common_fill(r, sk);
+	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM)
+		r->id.idiag_sport = inet_sk(sk)->inet_sport;
+	r->idiag_state = sk->sk_state;
+
+	if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
+		nlmsg_cancel(skb, nlh);
+		return err;
+	}
+
+	handler = inet_diag_lock_handler(sk->sk_protocol);
+	if (IS_ERR(handler)) {
+		inet_diag_unlock_handler(handler);
+		nlmsg_cancel(skb, nlh);
+		return PTR_ERR(handler);
+	}
+
+	attr = handler->idiag_info_size
+		? nla_reserve(skb, INET_DIAG_INFO, handler->idiag_info_size)
+		: NULL;
+	if (attr)
+		info = nla_data(attr);
+
+	handler->idiag_get_info(sk, r, info);
+	inet_diag_unlock_handler(handler);
+
+	nlmsg_end(skb, nlh);
+	return 0;
+}
+
 static const struct sock_diag_handler inet_diag_handler = {
 	.family = AF_INET,
 	.dump = inet_diag_handler_dump,
+	.get_info = inet_diag_handler_get_info,
 };
 
 static const struct sock_diag_handler inet6_diag_handler = {
 	.family = AF_INET6,
 	.dump = inet_diag_handler_dump,
+	.get_info = inet_diag_handler_get_info,
 };
 
 int inet_diag_register(const struct inet_diag_handler *h)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c6fb80b..5f9b063 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -18,6 +18,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <linux/vmalloc.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
@@ -90,10 +91,6 @@
 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 		    const unsigned short snum)
 {
-	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
-
-	atomic_inc(&hashinfo->bsockets);
-
 	inet_sk(sk)->inet_num = snum;
 	sk_add_bind_node(sk, &tb->owners);
 	tb->num_owners++;
@@ -111,8 +108,6 @@
 	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
 	struct inet_bind_bucket *tb;
 
-	atomic_dec(&hashinfo->bsockets);
-
 	spin_lock(&head->lock);
 	tb = inet_csk(sk)->icsk_bind_hash;
 	__sk_del_bind_node(sk);
@@ -399,9 +394,10 @@
 	return -EADDRNOTAVAIL;
 }
 
-static inline u32 inet_sk_port_offset(const struct sock *sk)
+static u32 inet_sk_port_offset(const struct sock *sk)
 {
 	const struct inet_sock *inet = inet_sk(sk);
+
 	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
 					  inet->inet_daddr,
 					  inet->inet_dport);
@@ -507,8 +503,14 @@
 		inet_get_local_port_range(net, &low, &high);
 		remaining = (high - low) + 1;
 
+		/* By starting with offset being an even number,
+		 * we tend to leave about 50% of ports for other uses,
+		 * like bind(0).
+		 */
+		offset &= ~1;
+
 		local_bh_disable();
-		for (i = 1; i <= remaining; i++) {
+		for (i = 0; i < remaining; i++) {
 			port = low + (i + offset) % remaining;
 			if (inet_is_local_reserved_port(net, port))
 				continue;
@@ -552,7 +554,7 @@
 		return -EADDRNOTAVAIL;
 
 ok:
-		hint += i;
+		hint += (i + 2) & ~1;
 
 		/* Head lock still held and bh's disabled */
 		inet_bind_hash(sk, tb, port);
@@ -599,7 +601,11 @@
 int inet_hash_connect(struct inet_timewait_death_row *death_row,
 		      struct sock *sk)
 {
-	return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
+	u32 port_offset = 0;
+
+	if (!inet_sk(sk)->inet_num)
+		port_offset = inet_sk_port_offset(sk);
+	return __inet_hash_connect(death_row, sk, port_offset,
 				   __inet_check_established);
 }
 EXPORT_SYMBOL_GPL(inet_hash_connect);
@@ -608,7 +614,6 @@
 {
 	int i;
 
-	atomic_set(&h->bsockets, 0);
 	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
 		spin_lock_init(&h->listening_hash[i].lock);
 		INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
@@ -616,3 +621,33 @@
 		}
 }
 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
+
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
+{
+	unsigned int i, nblocks = 1;
+
+	if (sizeof(spinlock_t) != 0) {
+		/* allocate 2 cache lines or at least one spinlock per cpu */
+		nblocks = max_t(unsigned int,
+				2 * L1_CACHE_BYTES / sizeof(spinlock_t),
+				1);
+		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
+
+		/* no more locks than number of hash buckets */
+		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
+
+		hashinfo->ehash_locks =	kmalloc_array(nblocks, sizeof(spinlock_t),
+						      GFP_KERNEL | __GFP_NOWARN);
+		if (!hashinfo->ehash_locks)
+			hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
+
+		if (!hashinfo->ehash_locks)
+			return -ENOMEM;
+
+		for (i = 0; i < nblocks; i++)
+			spin_lock_init(&hashinfo->ehash_locks[i]);
+	}
+	hashinfo->ehash_locks_mask = nblocks - 1;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 00ec8d5..2ffbd16 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -170,7 +170,7 @@
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
-void tw_timer_handler(unsigned long data)
+static void tw_timer_handler(unsigned long data)
 {
 	struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
 
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3674484..2d3aa40 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,17 +39,21 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 
-static bool ip_may_fragment(const struct sk_buff *skb)
-{
-	return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-		skb->ignore_df;
-}
-
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
 	if (skb->len <= mtu)
 		return false;
 
+	if (unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0))
+		return false;
+
+	/* original fragment exceeds mtu and DF is set */
+	if (unlikely(IPCB(skb)->frag_max_size > mtu))
+		return true;
+
+	if (skb->ignore_df)
+		return false;
+
 	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
 		return false;
 
@@ -114,7 +118,7 @@
 
 	IPCB(skb)->flags |= IPSKB_FORWARDED;
 	mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
-	if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
+	if (ip_exceeds_mtu(skb, mtu)) {
 		IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 			  htonl(mtu));
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index cc1da6d..a50dc6d4 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -75,6 +75,7 @@
 	__be16		id;
 	u8		protocol;
 	u8		ecn; /* RFC3168 support */
+	u16		max_df_size; /* largest frag with DF set seen */
 	int             iif;
 	unsigned int    rid;
 	struct inet_peer *peer;
@@ -173,6 +174,15 @@
 	inet_frag_kill(&ipq->q, &ip4_frags);
 }
 
+static bool frag_expire_skip_icmp(u32 user)
+{
+	return user == IP_DEFRAG_AF_PACKET ||
+	       ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
+					 __IP_DEFRAG_CONNTRACK_IN_END) ||
+	       ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+					 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
+}
+
 /*
  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
  */
@@ -217,10 +227,8 @@
 		/* Only an end host needs to send an ICMP
 		 * "Fragment Reassembly Timeout" message, per RFC792.
 		 */
-		if (qp->user == IP_DEFRAG_AF_PACKET ||
-		    ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
-		     (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
-		     (skb_rtable(head)->rt_type != RTN_LOCAL)))
+		if (frag_expire_skip_icmp(qp->user) &&
+		    (skb_rtable(head)->rt_type != RTN_LOCAL))
 			goto out_rcu_unlock;
 
 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
@@ -319,6 +327,7 @@
 {
 	struct sk_buff *prev, *next;
 	struct net_device *dev;
+	unsigned int fragsize;
 	int flags, offset;
 	int ihl, end;
 	int err = -ENOENT;
@@ -474,9 +483,14 @@
 	if (offset == 0)
 		qp->q.flags |= INET_FRAG_FIRST_IN;
 
+	fragsize = skb->len + ihl;
+
+	if (fragsize > qp->q.max_size)
+		qp->q.max_size = fragsize;
+
 	if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
-	    skb->len + ihl > qp->q.max_size)
-		qp->q.max_size = skb->len + ihl;
+	    fragsize > qp->max_df_size)
+		qp->max_df_size = fragsize;
 
 	if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
 	    qp->q.meat == qp->q.len) {
@@ -606,13 +620,27 @@
 	head->next = NULL;
 	head->dev = dev;
 	head->tstamp = qp->q.stamp;
-	IPCB(head)->frag_max_size = qp->q.max_size;
+	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
 
 	iph = ip_hdr(head);
-	/* max_size != 0 implies at least one fragment had IP_DF set */
-	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
 	iph->tot_len = htons(len);
 	iph->tos |= ecn;
+
+	/* When we set IP_DF on a refragmented skb we must also force a
+	 * call to ip_fragment to avoid forwarding a DF-skb of size s while
+	 * original sender only sent fragments of size f (where f < s).
+	 *
+	 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
+	 * frag seen to avoid sending tiny DF-fragments in case skb was built
+	 * from one very small df-fragment and one large non-df frag.
+	 */
+	if (qp->max_df_size == qp->q.max_size) {
+		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+		iph->frag_off = htons(IP_DF);
+	} else {
+		iph->frag_off = 0;
+	}
+
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
 	qp->q.fragments = NULL;
 	qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c65b93a..6bf89a6 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -83,6 +83,10 @@
 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
+static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+		       unsigned int mtu,
+		       int (*output)(struct sock *, struct sk_buff *));
+
 /* Generate a checksum for an outgoing IP datagram. */
 void ip_send_check(struct iphdr *iph)
 {
@@ -91,7 +95,7 @@
 }
 EXPORT_SYMBOL(ip_send_check);
 
-int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
 	struct iphdr *iph = ip_hdr(skb);
 
@@ -168,7 +172,7 @@
 }
 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 
-static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct rtable *rt = (struct rtable *)dst;
@@ -216,7 +220,8 @@
 	return -EINVAL;
 }
 
-static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb,
+				unsigned int mtu)
 {
 	netdev_features_t features;
 	struct sk_buff *segs;
@@ -224,7 +229,7 @@
 
 	/* common case: locally created skb or seglen is <= mtu */
 	if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
-	      skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+	      skb_gso_network_seglen(skb) <= mtu)
 		return ip_finish_output2(sk, skb);
 
 	/* Slowpath -  GSO segment length is exceeding the dst MTU.
@@ -248,7 +253,7 @@
 		int err;
 
 		segs->next = NULL;
-		err = ip_fragment(sk, segs, ip_finish_output2);
+		err = ip_fragment(sk, segs, mtu, ip_finish_output2);
 
 		if (err && ret == 0)
 			ret = err;
@@ -260,6 +265,8 @@
 
 static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
 {
+	unsigned int mtu;
+
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 	/* Policy lookup after SNAT yielded a new policy */
 	if (skb_dst(skb)->xfrm) {
@@ -267,11 +274,12 @@
 		return dst_output_sk(sk, skb);
 	}
 #endif
+	mtu = ip_skb_dst_mtu(skb);
 	if (skb_is_gso(skb))
-		return ip_finish_output_gso(sk, skb);
+		return ip_finish_output_gso(sk, skb, mtu);
 
-	if (skb->len > ip_skb_dst_mtu(skb))
-		return ip_fragment(sk, skb, ip_finish_output2);
+	if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+		return ip_fragment(sk, skb, mtu, ip_finish_output2);
 
 	return ip_finish_output2(sk, skb);
 }
@@ -478,6 +486,31 @@
 	skb_copy_secmark(to, from);
 }
 
+static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+		       unsigned int mtu,
+		       int (*output)(struct sock *, struct sk_buff *))
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	if ((iph->frag_off & htons(IP_DF)) == 0)
+		return ip_do_fragment(sk, skb, output);
+
+	if (unlikely(!skb->ignore_df ||
+		     (IPCB(skb)->frag_max_size &&
+		      IPCB(skb)->frag_max_size > mtu))) {
+		struct rtable *rt = skb_rtable(skb);
+		struct net_device *dev = rt->dst.dev;
+
+		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+			  htonl(mtu));
+		kfree_skb(skb);
+		return -EMSGSIZE;
+	}
+
+	return ip_do_fragment(sk, skb, output);
+}
+
 /*
  *	This IP datagram is too large to be sent in one piece.  Break it up into
  *	smaller pieces (each of size equal to IP header plus
@@ -485,8 +518,8 @@
  *	single device frame, and queue such a frame for sending.
  */
 
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
-		int (*output)(struct sock *, struct sk_buff *))
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+		   int (*output)(struct sock *, struct sk_buff *))
 {
 	struct iphdr *iph;
 	int ptr;
@@ -507,15 +540,8 @@
 	iph = ip_hdr(skb);
 
 	mtu = ip_skb_dst_mtu(skb);
-	if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
-		     (IPCB(skb)->frag_max_size &&
-		      IPCB(skb)->frag_max_size > mtu))) {
-		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
-			  htonl(mtu));
-		kfree_skb(skb);
-		return -EMSGSIZE;
-	}
+	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
+		mtu = IPCB(skb)->frag_max_size;
 
 	/*
 	 *	Setup starting values.
@@ -523,10 +549,6 @@
 
 	hlen = iph->ihl * 4;
 	mtu = mtu - hlen;	/* Size of data space */
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	if (skb->nf_bridge)
-		mtu -= nf_bridge_mtu_reduction(skb);
-#endif
 	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
 
 	/* When frag_list is given, use it. First, check its validity:
@@ -711,6 +733,9 @@
 		iph = ip_hdr(skb2);
 		iph->frag_off = htons((offset >> 3));
 
+		if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
+			iph->frag_off |= htons(IP_DF);
+
 		/* ANK: dirty, but effective trick. Upgrade options only if
 		 * the segment to be fragmented was THE FIRST (otherwise,
 		 * options are already fixed) and make it ONCE
@@ -751,7 +776,7 @@
 	IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 	return err;
 }
-EXPORT_SYMBOL(ip_fragment);
+EXPORT_SYMBOL(ip_do_fragment);
 
 int
 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
@@ -1217,11 +1242,9 @@
 	}
 
 	while (size > 0) {
-		int i;
-
-		if (skb_is_gso(skb))
+		if (skb_is_gso(skb)) {
 			len = size;
-		else {
+		} else {
 
 			/* Check if the remaining data fits into current packet. */
 			len = mtu - skb->len;
@@ -1273,15 +1296,10 @@
 			continue;
 		}
 
-		i = skb_shinfo(skb)->nr_frags;
 		if (len > size)
 			len = size;
-		if (skb_can_coalesce(skb, i, page, offset)) {
-			skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
-		} else if (i < MAX_SKB_FRAGS) {
-			get_page(page);
-			skb_fill_page_desc(skb, i, page, offset, len);
-		} else {
+
+		if (skb_append_pagefrags(skb, page, offset, len)) {
 			err = -EMSGSIZE;
 			goto error;
 		}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 7cfb089..c3c359a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -432,6 +432,15 @@
 		kfree_skb(skb);
 }
 
+/* For some errors we have valid addr_offset even with zero payload and
+ * zero port. Also, addr_offset should be supported if port is set.
+ */
+static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
+{
+	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
+}
+
 /* IPv4 supports cmsg on all imcp errors and some timestamps
  *
  * Timestamp code paths do not initialize the fields expected by cmsg:
@@ -498,7 +507,7 @@
 
 	serr = SKB_EXT_ERR(skb);
 
-	if (sin && serr->port) {
+	if (sin && ipv4_datagram_support_addr(serr)) {
 		sin->sin_family = AF_INET;
 		sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
 						   serr->addr_offset);
@@ -582,6 +591,7 @@
 	case IP_TRANSPARENT:
 	case IP_MINTTL:
 	case IP_NODEFRAG:
+	case IP_BIND_ADDRESS_NO_PORT:
 	case IP_UNICAST_IF:
 	case IP_MULTICAST_TTL:
 	case IP_MULTICAST_ALL:
@@ -732,6 +742,9 @@
 		}
 		inet->nodefrag = val ? 1 : 0;
 		break;
+	case IP_BIND_ADDRESS_NO_PORT:
+		inet->bind_address_no_port = val ? 1 : 0;
+		break;
 	case IP_MTU_DISCOVER:
 		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
 			goto e_inval;
@@ -1324,6 +1337,9 @@
 	case IP_NODEFRAG:
 		val = inet->nodefrag;
 		break;
+	case IP_BIND_ADDRESS_NO_PORT:
+		val = inet->bind_address_no_port;
+		break;
 	case IP_MTU_DISCOVER:
 		val = inet->pmtudisc;
 		break;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index ce63ab2..6a51a71 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -98,7 +98,7 @@
 			return -ENOMEM;
 
 		eh = (struct ethhdr *)skb->data;
-		if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+		if (likely(eth_proto_is_802_3(eh->h_proto)))
 			skb->protocol = eh->h_proto;
 		else
 			skb->protocol = htons(ETH_P_802_2);
@@ -165,6 +165,8 @@
 {
 	int i;
 
+	netdev_stats_to_stats64(tot, &dev->stats);
+
 	for_each_possible_cpu(i) {
 		const struct pcpu_sw_netstats *tstats =
 						   per_cpu_ptr(dev->tstats, i);
@@ -185,22 +187,6 @@
 		tot->tx_bytes   += tx_bytes;
 	}
 
-	tot->multicast = dev->stats.multicast;
-
-	tot->rx_crc_errors = dev->stats.rx_crc_errors;
-	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
-	tot->rx_length_errors = dev->stats.rx_length_errors;
-	tot->rx_frame_errors = dev->stats.rx_frame_errors;
-	tot->rx_errors = dev->stats.rx_errors;
-
-	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
-	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
-	tot->tx_dropped = dev->stats.tx_dropped;
-	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
-	tot->tx_errors = dev->stats.tx_errors;
-
-	tot->collisions  = dev->stats.collisions;
-
 	return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ff96396..254238d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -251,7 +251,8 @@
 			return -EINVAL;
 	}
 
-	p.i_key = p.o_key = p.i_flags = p.o_flags = 0;
+	p.i_key = p.o_key = 0;
+	p.i_flags = p.o_flags = 0;
 	if (p.iph.ttl)
 		p.iph.frag_off |= htons(IP_DF);
 
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index fb20f36..2199a5d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -195,7 +195,8 @@
 
 config IP_NF_MATCH_RPFILTER
 	tristate '"rpfilter" reverse path filter match support'
-	depends on NETFILTER_ADVANCED && (IP_NF_MANGLE || IP_NF_RAW)
+	depends on NETFILTER_ADVANCED
+	depends on IP_NF_MANGLE || IP_NF_RAW
 	---help---
 	  This option allows you to match packets whose replies would
 	  go out via the interface the packet came in.
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index a612007..95c9b6e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -256,7 +256,7 @@
 	const struct arphdr *arp;
 	struct arpt_entry *e, *back;
 	const char *indev, *outdev;
-	void *table_base;
+	const void *table_base;
 	const struct xt_table_info *private;
 	struct xt_action_param acpar;
 	unsigned int addend;
@@ -275,7 +275,7 @@
 	 * pointer.
 	 */
 	smp_read_barrier_depends();
-	table_base = private->entries[smp_processor_id()];
+	table_base = private->entries;
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 	back = get_entry(table_base, private->underflow[hook]);
@@ -289,13 +289,15 @@
 	arp = arp_hdr(skb);
 	do {
 		const struct xt_entry_target *t;
+		struct xt_counters *counter;
 
 		if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
 			e = arpt_next_entry(e);
 			continue;
 		}
 
-		ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
+		counter = xt_get_this_cpu_counter(&e->counters);
+		ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1);
 
 		t = arpt_get_target_c(e);
 
@@ -521,6 +523,10 @@
 	if (ret)
 		return ret;
 
+	e->counters.pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(e->counters.pcnt))
+		return -ENOMEM;
+
 	t = arpt_get_target(e);
 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
 					t->u.user.revision);
@@ -538,6 +544,8 @@
 err:
 	module_put(t->u.kernel.target->me);
 out:
+	xt_percpu_counter_free(e->counters.pcnt);
+
 	return ret;
 }
 
@@ -614,6 +622,7 @@
 	if (par.target->destroy != NULL)
 		par.target->destroy(&par);
 	module_put(par.target->me);
+	xt_percpu_counter_free(e->counters.pcnt);
 }
 
 /* Checks and translates the user-supplied table segment (held in
@@ -702,12 +711,6 @@
 		return ret;
 	}
 
-	/* And one copy for every other CPU */
-	for_each_possible_cpu(i) {
-		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-			memcpy(newinfo->entries[i], entry0, newinfo->size);
-	}
-
 	return ret;
 }
 
@@ -722,14 +725,16 @@
 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
 		i = 0;
-		xt_entry_foreach(iter, t->entries[cpu], t->size) {
+		xt_entry_foreach(iter, t->entries, t->size) {
+			struct xt_counters *tmp;
 			u64 bcnt, pcnt;
 			unsigned int start;
 
+			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
 			do {
 				start = read_seqcount_begin(s);
-				bcnt = iter->counters.bcnt;
-				pcnt = iter->counters.pcnt;
+				bcnt = tmp->bcnt;
+				pcnt = tmp->pcnt;
 			} while (read_seqcount_retry(s, start));
 
 			ADD_COUNTER(counters[i], bcnt, pcnt);
@@ -774,7 +779,7 @@
 	if (IS_ERR(counters))
 		return PTR_ERR(counters);
 
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
+	loc_cpu_entry = private->entries;
 	/* ... then copy entire thing ... */
 	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
 		ret = -EFAULT;
@@ -863,16 +868,16 @@
 			     struct xt_table_info *newinfo)
 {
 	struct arpt_entry *iter;
-	void *loc_cpu_entry;
+	const void *loc_cpu_entry;
 	int ret;
 
 	if (!newinfo || !info)
 		return -EINVAL;
 
-	/* we dont care about newinfo->entries[] */
+	/* we dont care about newinfo->entries */
 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 	newinfo->initial_entries = 0;
-	loc_cpu_entry = info->entries[raw_smp_processor_id()];
+	loc_cpu_entry = info->entries;
 	xt_compat_init_offsets(NFPROTO_ARP, info->number);
 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
@@ -1037,7 +1042,7 @@
 	get_counters(oldinfo, counters);
 
 	/* Decrease module usage counts and free resource */
-	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
+	loc_cpu_old_entry = oldinfo->entries;
 	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
 		cleanup_entry(iter);
 
@@ -1084,8 +1089,7 @@
 	if (!newinfo)
 		return -ENOMEM;
 
-	/* choose the copy that is on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
 			   tmp.size) != 0) {
 		ret = -EFAULT;
@@ -1115,7 +1119,7 @@
 static int do_add_counters(struct net *net, const void __user *user,
 			   unsigned int len, int compat)
 {
-	unsigned int i, curcpu;
+	unsigned int i;
 	struct xt_counters_info tmp;
 	struct xt_counters *paddc;
 	unsigned int num_counters;
@@ -1125,7 +1129,6 @@
 	struct xt_table *t;
 	const struct xt_table_info *private;
 	int ret = 0;
-	void *loc_cpu_entry;
 	struct arpt_entry *iter;
 	unsigned int addend;
 #ifdef CONFIG_COMPAT
@@ -1181,12 +1184,13 @@
 	}
 
 	i = 0;
-	/* Choose the copy that is on our node */
-	curcpu = smp_processor_id();
-	loc_cpu_entry = private->entries[curcpu];
+
 	addend = xt_write_recseq_begin();
-	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
-		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+	xt_entry_foreach(iter,  private->entries, private->size) {
+		struct xt_counters *tmp;
+
+		tmp = xt_get_this_cpu_counter(&iter->counters);
+		ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
 		++i;
 	}
 	xt_write_recseq_end(addend);
@@ -1396,7 +1400,7 @@
 		newinfo->hook_entry[i] = info->hook_entry[i];
 		newinfo->underflow[i] = info->underflow[i];
 	}
-	entry1 = newinfo->entries[raw_smp_processor_id()];
+	entry1 = newinfo->entries;
 	pos = entry1;
 	size = total_size;
 	xt_entry_foreach(iter0, entry0, total_size) {
@@ -1416,9 +1420,17 @@
 
 	i = 0;
 	xt_entry_foreach(iter1, entry1, newinfo->size) {
-		ret = check_target(iter1, name);
-		if (ret != 0)
+		iter1->counters.pcnt = xt_percpu_counter_alloc();
+		if (IS_ERR_VALUE(iter1->counters.pcnt)) {
+			ret = -ENOMEM;
 			break;
+		}
+
+		ret = check_target(iter1, name);
+		if (ret != 0) {
+			xt_percpu_counter_free(iter1->counters.pcnt);
+			break;
+		}
 		++i;
 		if (strcmp(arpt_get_target(iter1)->u.user.name,
 		    XT_ERROR_TARGET) == 0)
@@ -1448,11 +1460,6 @@
 		return ret;
 	}
 
-	/* And one copy for every other CPU */
-	for_each_possible_cpu(i)
-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-			memcpy(newinfo->entries[i], entry1, newinfo->size);
-
 	*pinfo = newinfo;
 	*pentry0 = entry1;
 	xt_free_table_info(info);
@@ -1511,8 +1518,7 @@
 	if (!newinfo)
 		return -ENOMEM;
 
-	/* choose the copy that is on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
 		ret = -EFAULT;
 		goto free_newinfo;
@@ -1609,7 +1615,6 @@
 	void __user *pos;
 	unsigned int size;
 	int ret = 0;
-	void *loc_cpu_entry;
 	unsigned int i = 0;
 	struct arpt_entry *iter;
 
@@ -1617,11 +1622,9 @@
 	if (IS_ERR(counters))
 		return PTR_ERR(counters);
 
-	/* choose the copy on our node/cpu */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
 	pos = userptr;
 	size = total_size;
-	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+	xt_entry_foreach(iter, private->entries, total_size) {
 		ret = compat_copy_entry_to_user(iter, &pos,
 						&size, counters, i++);
 		if (ret != 0)
@@ -1790,8 +1793,7 @@
 		goto out;
 	}
 
-	/* choose the copy on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	memcpy(loc_cpu_entry, repl->entries, repl->size);
 
 	ret = translate_table(newinfo, loc_cpu_entry, repl);
@@ -1822,7 +1824,7 @@
 	private = xt_unregister_table(table);
 
 	/* Decrease module usage counts and free resources */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
+	loc_cpu_entry = private->entries;
 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
 		cleanup_entry(iter);
 	if (private->number > private->initial_entries)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2d0e265..6c72fbb 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -254,15 +254,13 @@
 			 const struct xt_table_info *private,
 			 const struct ipt_entry *e)
 {
-	const void *table_base;
 	const struct ipt_entry *root;
 	const char *hookname, *chainname, *comment;
 	const struct ipt_entry *iter;
 	unsigned int rulenum = 0;
 	struct net *net = dev_net(in ? in : out);
 
-	table_base = private->entries[smp_processor_id()];
-	root = get_entry(table_base, private->hook_entry[hook]);
+	root = get_entry(private->entries, private->hook_entry[hook]);
 
 	hookname = chainname = hooknames[hook];
 	comment = comments[NF_IP_TRACE_COMMENT_RULE];
@@ -331,7 +329,7 @@
 	 * pointer.
 	 */
 	smp_read_barrier_depends();
-	table_base = private->entries[cpu];
+	table_base = private->entries;
 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
 	stackptr   = per_cpu_ptr(private->stackptr, cpu);
 	origptr    = *stackptr;
@@ -345,6 +343,7 @@
 	do {
 		const struct xt_entry_target *t;
 		const struct xt_entry_match *ematch;
+		struct xt_counters *counter;
 
 		IP_NF_ASSERT(e);
 		if (!ip_packet_match(ip, indev, outdev,
@@ -361,7 +360,8 @@
 				goto no_match;
 		}
 
-		ADD_COUNTER(e->counters, skb->len, 1);
+		counter = xt_get_this_cpu_counter(&e->counters);
+		ADD_COUNTER(*counter, skb->len, 1);
 
 		t = ipt_get_target(e);
 		IP_NF_ASSERT(t->u.kernel.target);
@@ -665,6 +665,10 @@
 	if (ret)
 		return ret;
 
+	e->counters.pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(e->counters.pcnt))
+		return -ENOMEM;
+
 	j = 0;
 	mtpar.net	= net;
 	mtpar.table     = name;
@@ -691,6 +695,7 @@
 	ret = check_target(e, net, name);
 	if (ret)
 		goto err;
+
 	return 0;
  err:
 	module_put(t->u.kernel.target->me);
@@ -700,6 +705,9 @@
 			break;
 		cleanup_match(ematch, net);
 	}
+
+	xt_percpu_counter_free(e->counters.pcnt);
+
 	return ret;
 }
 
@@ -784,6 +792,7 @@
 	if (par.target->destroy != NULL)
 		par.target->destroy(&par);
 	module_put(par.target->me);
+	xt_percpu_counter_free(e->counters.pcnt);
 }
 
 /* Checks and translates the user-supplied table segment (held in
@@ -866,12 +875,6 @@
 		return ret;
 	}
 
-	/* And one copy for every other CPU */
-	for_each_possible_cpu(i) {
-		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-			memcpy(newinfo->entries[i], entry0, newinfo->size);
-	}
-
 	return ret;
 }
 
@@ -887,14 +890,16 @@
 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
 		i = 0;
-		xt_entry_foreach(iter, t->entries[cpu], t->size) {
+		xt_entry_foreach(iter, t->entries, t->size) {
+			struct xt_counters *tmp;
 			u64 bcnt, pcnt;
 			unsigned int start;
 
+			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
 			do {
 				start = read_seqcount_begin(s);
-				bcnt = iter->counters.bcnt;
-				pcnt = iter->counters.pcnt;
+				bcnt = tmp->bcnt;
+				pcnt = tmp->pcnt;
 			} while (read_seqcount_retry(s, start));
 
 			ADD_COUNTER(counters[i], bcnt, pcnt);
@@ -939,11 +944,7 @@
 	if (IS_ERR(counters))
 		return PTR_ERR(counters);
 
-	/* choose the copy that is on our node/cpu, ...
-	 * This choice is lazy (because current thread is
-	 * allowed to migrate to another cpu)
-	 */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
+	loc_cpu_entry = private->entries;
 	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
 		ret = -EFAULT;
 		goto free_counters;
@@ -1051,16 +1052,16 @@
 			     struct xt_table_info *newinfo)
 {
 	struct ipt_entry *iter;
-	void *loc_cpu_entry;
+	const void *loc_cpu_entry;
 	int ret;
 
 	if (!newinfo || !info)
 		return -EINVAL;
 
-	/* we dont care about newinfo->entries[] */
+	/* we dont care about newinfo->entries */
 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 	newinfo->initial_entries = 0;
-	loc_cpu_entry = info->entries[raw_smp_processor_id()];
+	loc_cpu_entry = info->entries;
 	xt_compat_init_offsets(AF_INET, info->number);
 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
@@ -1181,7 +1182,6 @@
 	struct xt_table *t;
 	struct xt_table_info *oldinfo;
 	struct xt_counters *counters;
-	void *loc_cpu_old_entry;
 	struct ipt_entry *iter;
 
 	ret = 0;
@@ -1224,8 +1224,7 @@
 	get_counters(oldinfo, counters);
 
 	/* Decrease module usage counts and free resource */
-	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
-	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+	xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
 		cleanup_entry(iter, net);
 
 	xt_free_table_info(oldinfo);
@@ -1271,8 +1270,7 @@
 	if (!newinfo)
 		return -ENOMEM;
 
-	/* choose the copy that is on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
 			   tmp.size) != 0) {
 		ret = -EFAULT;
@@ -1303,7 +1301,7 @@
 do_add_counters(struct net *net, const void __user *user,
                 unsigned int len, int compat)
 {
-	unsigned int i, curcpu;
+	unsigned int i;
 	struct xt_counters_info tmp;
 	struct xt_counters *paddc;
 	unsigned int num_counters;
@@ -1313,7 +1311,6 @@
 	struct xt_table *t;
 	const struct xt_table_info *private;
 	int ret = 0;
-	void *loc_cpu_entry;
 	struct ipt_entry *iter;
 	unsigned int addend;
 #ifdef CONFIG_COMPAT
@@ -1369,12 +1366,12 @@
 	}
 
 	i = 0;
-	/* Choose the copy that is on our node */
-	curcpu = smp_processor_id();
-	loc_cpu_entry = private->entries[curcpu];
 	addend = xt_write_recseq_begin();
-	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
-		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+	xt_entry_foreach(iter, private->entries, private->size) {
+		struct xt_counters *tmp;
+
+		tmp = xt_get_this_cpu_counter(&iter->counters);
+		ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
 		++i;
 	}
 	xt_write_recseq_end(addend);
@@ -1444,7 +1441,6 @@
 compat_find_calc_match(struct xt_entry_match *m,
 		       const char *name,
 		       const struct ipt_ip *ip,
-		       unsigned int hookmask,
 		       int *size)
 {
 	struct xt_match *match;
@@ -1513,8 +1509,7 @@
 	entry_offset = (void *)e - (void *)base;
 	j = 0;
 	xt_ematch_foreach(ematch, e) {
-		ret = compat_find_calc_match(ematch, name,
-					     &e->ip, e->comefrom, &off);
+		ret = compat_find_calc_match(ematch, name, &e->ip, &off);
 		if (ret != 0)
 			goto release_matches;
 		++j;
@@ -1610,6 +1605,10 @@
 	unsigned int j;
 	int ret = 0;
 
+	e->counters.pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(e->counters.pcnt))
+		return -ENOMEM;
+
 	j = 0;
 	mtpar.net	= net;
 	mtpar.table     = name;
@@ -1634,6 +1633,9 @@
 			break;
 		cleanup_match(ematch, net);
 	}
+
+	xt_percpu_counter_free(e->counters.pcnt);
+
 	return ret;
 }
 
@@ -1718,7 +1720,7 @@
 		newinfo->hook_entry[i] = info->hook_entry[i];
 		newinfo->underflow[i] = info->underflow[i];
 	}
-	entry1 = newinfo->entries[raw_smp_processor_id()];
+	entry1 = newinfo->entries;
 	pos = entry1;
 	size = total_size;
 	xt_entry_foreach(iter0, entry0, total_size) {
@@ -1770,11 +1772,6 @@
 		return ret;
 	}
 
-	/* And one copy for every other CPU */
-	for_each_possible_cpu(i)
-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-			memcpy(newinfo->entries[i], entry1, newinfo->size);
-
 	*pinfo = newinfo;
 	*pentry0 = entry1;
 	xt_free_table_info(info);
@@ -1821,8 +1818,7 @@
 	if (!newinfo)
 		return -ENOMEM;
 
-	/* choose the copy that is on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
 			   tmp.size) != 0) {
 		ret = -EFAULT;
@@ -1893,7 +1889,6 @@
 	void __user *pos;
 	unsigned int size;
 	int ret = 0;
-	const void *loc_cpu_entry;
 	unsigned int i = 0;
 	struct ipt_entry *iter;
 
@@ -1901,14 +1896,9 @@
 	if (IS_ERR(counters))
 		return PTR_ERR(counters);
 
-	/* choose the copy that is on our node/cpu, ...
-	 * This choice is lazy (because current thread is
-	 * allowed to migrate to another cpu)
-	 */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
 	pos = userptr;
 	size = total_size;
-	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+	xt_entry_foreach(iter, private->entries, total_size) {
 		ret = compat_copy_entry_to_user(iter, &pos,
 						&size, counters, i++);
 		if (ret != 0)
@@ -2083,8 +2073,7 @@
 		goto out;
 	}
 
-	/* choose the copy on our node/cpu, but dont care about preemption */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	memcpy(loc_cpu_entry, repl->entries, repl->size);
 
 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
@@ -2115,7 +2104,7 @@
 	private = xt_unregister_table(table);
 
 	/* Decrease module usage counts and free resources */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
+	loc_cpu_entry = private->entries;
 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
 		cleanup_entry(iter, net);
 	if (private->number > private->initial_entries)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 771ab3d..45cb16a 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -367,6 +367,11 @@
 	struct clusterip_config *config;
 	int ret;
 
+	if (par->nft_compat) {
+		pr_err("cannot use CLUSTERIP target from nftables compat\n");
+		return -EOPNOTSUPP;
+	}
+
 	if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
 	    cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
 	    cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index e9e6779..fe8cc18 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -18,7 +18,7 @@
 #include <net/netfilter/nf_conntrack_synproxy.h>
 
 static struct iphdr *
-synproxy_build_ip(struct sk_buff *skb, u32 saddr, u32 daddr)
+synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 {
 	struct iphdr *iph;
 
@@ -220,7 +220,7 @@
 	nth->ack_seq	= th->ack_seq;
 	tcp_flag_word(nth) = TCP_FLAG_ACK;
 	nth->doff	= tcp_hdr_size / 4;
-	nth->window	= ntohs(htons(th->window) >> opts->wscale);
+	nth->window	= htons(ntohs(th->window) >> opts->wscale);
 	nth->check	= 0;
 	nth->urg_ptr	= 0;
 
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 4bfaedf..8618fd1 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -40,7 +40,7 @@
 	struct net *net = dev_net(dev);
 	int ret __maybe_unused;
 
-	if (fib_lookup(net, fl4, &res))
+	if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE))
 		return false;
 
 	if (res.type != RTN_UNICAST) {
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index e1f3b91..da5d483 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -298,6 +298,8 @@
 	SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
 	SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
 	SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
+	SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
+	SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
 	SNMP_MIB_SENTINEL
 };
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f45f2a1..d0362a2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,12 +457,9 @@
 }
 
 #define IP_IDENTS_SZ 2048u
-struct ip_ident_bucket {
-	atomic_t	id;
-	u32		stamp32;
-};
 
-static struct ip_ident_bucket *ip_idents __read_mostly;
+static atomic_t *ip_idents __read_mostly;
+static u32 *ip_tstamps __read_mostly;
 
 /* In order to protect privacy, we add a perturbation to identifiers
  * if one generator is seldom used. This makes hard for an attacker
@@ -470,15 +467,16 @@
  */
 u32 ip_idents_reserve(u32 hash, int segs)
 {
-	struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
-	u32 old = ACCESS_ONCE(bucket->stamp32);
+	u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
+	atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
+	u32 old = ACCESS_ONCE(*p_tstamp);
 	u32 now = (u32)jiffies;
 	u32 delta = 0;
 
-	if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+	if (old != now && cmpxchg(p_tstamp, old, now) == old)
 		delta = prandom_u32_max(now - old);
 
-	return atomic_add_return(segs + delta, &bucket->id) - segs;
+	return atomic_add_return(segs + delta, p_id) - segs;
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
@@ -749,7 +747,7 @@
 		if (!(n->nud_state & NUD_VALID)) {
 			neigh_event_send(n, NULL);
 		} else {
-			if (fib_lookup(net, fl4, &res) == 0) {
+			if (fib_lookup(net, fl4, &res, 0) == 0) {
 				struct fib_nh *nh = &FIB_RES_NH(res);
 
 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
@@ -977,7 +975,7 @@
 		return;
 
 	rcu_read_lock();
-	if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
+	if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
 		struct fib_nh *nh = &FIB_RES_NH(res);
 
 		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
@@ -1188,7 +1186,7 @@
 		fl4.flowi4_mark = skb->mark;
 
 		rcu_read_lock();
-		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
+		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
 			src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
 		else
 			src = inet_select_addr(rt->dst.dev,
@@ -1718,7 +1716,7 @@
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
 	fl4.daddr = daddr;
 	fl4.saddr = saddr;
-	err = fib_lookup(net, &fl4, &res);
+	err = fib_lookup(net, &fl4, &res, 0);
 	if (err != 0) {
 		if (!IN_DEV_FORWARD(in_dev))
 			err = -EHOSTUNREACH;
@@ -2097,7 +2095,8 @@
 			goto out;
 		}
 		if (ipv4_is_local_multicast(fl4->daddr) ||
-		    ipv4_is_lbcast(fl4->daddr)) {
+		    ipv4_is_lbcast(fl4->daddr) ||
+		    fl4->flowi4_proto == IPPROTO_IGMP) {
 			if (!fl4->saddr)
 				fl4->saddr = inet_select_addr(dev_out, 0,
 							      RT_SCOPE_LINK);
@@ -2124,7 +2123,7 @@
 		goto make_route;
 	}
 
-	if (fib_lookup(net, fl4, &res)) {
+	if (fib_lookup(net, fl4, &res, 0)) {
 		res.fi = NULL;
 		res.table = NULL;
 		if (fl4->flowi4_oif) {
@@ -2742,6 +2741,10 @@
 
 	prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
 
+	ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
+	if (!ip_tstamps)
+		panic("IP: failed to allocate ip_tstamps\n");
+
 	for_each_possible_cpu(cpu) {
 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index df849e5..d70b1f6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -219,9 +219,9 @@
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_check);
 
-static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
-				    struct request_sock *req,
-				    struct dst_entry *dst)
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+				 struct request_sock *req,
+				 struct dst_entry *dst)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct sock *child;
@@ -235,7 +235,7 @@
 	}
 	return child;
 }
-
+EXPORT_SYMBOL(tcp_get_cookie_sock);
 
 /*
  * when syncookies are in effect and tcp timestamps are enabled we stored
@@ -391,7 +391,7 @@
 	ireq->rcv_wscale  = rcv_wscale;
 	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
 
-	ret = get_cookie_sock(sk, skb, req, &rt->dst);
+	ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
 	/* ip_queue_xmit() depends on our flow being setup
 	 * Normal sockets get it right from inet_csk_route_child_sock()
 	 */
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c3852a7..433231c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,11 +41,19 @@
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
+	bool same_parity = !((range[0] ^ range[1]) & 1);
+
 	write_seqlock(&net->ipv4.ip_local_ports.lock);
+	if (same_parity && !net->ipv4.ip_local_ports.warned) {
+		net->ipv4.ip_local_ports.warned = true;
+		pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
+	}
 	net->ipv4.ip_local_ports.range[0] = range[0];
 	net->ipv4.ip_local_ports.range[1] = range[1];
 	write_sequnlock(&net->ipv4.ip_local_ports.lock);
@@ -522,7 +530,7 @@
 		.maxlen		= sizeof(sysctl_tcp_wmem),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
+		.extra1		= &min_sndbuf,
 	},
 	{
 		.procname	= "tcp_notsent_lowat",
@@ -537,7 +545,7 @@
 		.maxlen		= sizeof(sysctl_tcp_rmem),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
+		.extra1		= &min_rcvbuf,
 	},
 	{
 		.procname	= "tcp_app_win",
@@ -702,7 +710,7 @@
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
+		.extra1		= &one,
 		.extra2		= &gso_max_segs,
 	},
 	{
@@ -750,7 +758,7 @@
 		.maxlen		= sizeof(sysctl_udp_rmem_min),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one
+		.extra1		= &min_rcvbuf,
 	},
 	{
 		.procname	= "udp_wmem_min",
@@ -758,7 +766,7 @@
 		.maxlen		= sizeof(sysctl_udp_wmem_min),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one
+		.extra1		= &min_sndbuf,
 	},
 	{ }
 };
@@ -821,6 +829,13 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
+		.procname	= "tcp_ecn_fallback",
+		.data		= &init_net.ipv4.sysctl_tcp_ecn_fallback,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
 		.procname	= "ip_local_port_range",
 		.maxlen		= sizeof(init_net.ipv4.ip_local_ports.range),
 		.data		= &init_net.ipv4.ip_local_ports.range,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f1377f2..7f40567 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -695,8 +695,9 @@
 	struct tcp_splice_state *tss = rd_desc->arg.data;
 	int ret;
 
-	ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
-			      tss->flags);
+	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
+			      min(rd_desc->count, len), tss->flags,
+			      skb_socket_splice);
 	if (ret > 0)
 		rd_desc->count -= ret;
 	return ret;
@@ -809,16 +810,28 @@
 }
 EXPORT_SYMBOL(tcp_splice_read);
 
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+				    bool force_schedule)
 {
 	struct sk_buff *skb;
 
 	/* The TCP header must be at least 32-bit aligned.  */
 	size = ALIGN(size, 4);
 
+	if (unlikely(tcp_under_memory_pressure(sk)))
+		sk_mem_reclaim_partial(sk);
+
 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
-	if (skb) {
-		if (sk_wmem_schedule(sk, skb->truesize)) {
+	if (likely(skb)) {
+		bool mem_scheduled;
+
+		if (force_schedule) {
+			mem_scheduled = true;
+			sk_forced_mem_schedule(sk, skb->truesize);
+		} else {
+			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
+		}
+		if (likely(mem_scheduled)) {
 			skb_reserve(skb, sk->sk_prot->max_header);
 			/*
 			 * Make sure that we have exactly size bytes
@@ -908,7 +921,8 @@
 			if (!sk_stream_memory_free(sk))
 				goto wait_for_sndbuf;
 
-			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
+						  skb_queue_empty(&sk->sk_write_queue));
 			if (!skb)
 				goto wait_for_memory;
 
@@ -987,6 +1001,9 @@
 	if (copied)
 		goto out;
 out_err:
+	/* make sure we wake any epoll edge trigger waiter */
+	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
+		sk->sk_write_space(sk);
 	return sk_stream_error(sk, flags, err);
 }
 
@@ -1144,7 +1161,8 @@
 
 			skb = sk_stream_alloc_skb(sk,
 						  select_size(sk, sg),
-						  sk->sk_allocation);
+						  sk->sk_allocation,
+						  skb_queue_empty(&sk->sk_write_queue));
 			if (!skb)
 				goto wait_for_memory;
 
@@ -1275,6 +1293,9 @@
 		goto out;
 out_err:
 	err = sk_stream_error(sk, flags, err);
+	/* make sure we wake any epoll edge trigger waiter */
+	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
+		sk->sk_write_space(sk);
 	release_sock(sk);
 	return err;
 }
@@ -2483,6 +2504,13 @@
 			icsk->icsk_syn_retries = val;
 		break;
 
+	case TCP_SAVE_SYN:
+		if (val < 0 || val > 1)
+			err = -EINVAL;
+		else
+			tp->save_syn = val;
+		break;
+
 	case TCP_LINGER2:
 		if (val < 0)
 			tp->linger2 = -1;
@@ -2545,10 +2573,13 @@
 
 	case TCP_FASTOPEN:
 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
-		    TCPF_LISTEN)))
+		    TCPF_LISTEN))) {
+			tcp_fastopen_init_key_once(true);
+
 			err = fastopen_init_queue(sk, val);
-		else
+		} else {
 			err = -EINVAL;
+		}
 		break;
 	case TCP_TIMESTAMP:
 		if (!tp->repair)
@@ -2596,13 +2627,15 @@
 /* Return information about state of tcp endpoint in API format. */
 void tcp_get_info(struct sock *sk, struct tcp_info *info)
 {
-	const struct tcp_sock *tp = tcp_sk(sk);
+	const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	u32 now = tcp_time_stamp;
 	unsigned int start;
 	u32 rate;
 
 	memset(info, 0, sizeof(*info));
+	if (sk->sk_type != SOCK_STREAM)
+		return;
 
 	info->tcpi_state = sk->sk_state;
 	info->tcpi_ca_state = icsk->icsk_ca_state;
@@ -2672,6 +2705,8 @@
 		info->tcpi_bytes_acked = tp->bytes_acked;
 		info->tcpi_bytes_received = tp->bytes_received;
 	} while (u64_stats_fetch_retry_irq(&tp->syncp, start));
+	info->tcpi_segs_out = tp->segs_out;
+	info->tcpi_segs_in = tp->segs_in;
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
@@ -2821,6 +2856,42 @@
 	case TCP_NOTSENT_LOWAT:
 		val = tp->notsent_lowat;
 		break;
+	case TCP_SAVE_SYN:
+		val = tp->save_syn;
+		break;
+	case TCP_SAVED_SYN: {
+		if (get_user(len, optlen))
+			return -EFAULT;
+
+		lock_sock(sk);
+		if (tp->saved_syn) {
+			if (len < tp->saved_syn[0]) {
+				if (put_user(tp->saved_syn[0], optlen)) {
+					release_sock(sk);
+					return -EFAULT;
+				}
+				release_sock(sk);
+				return -EINVAL;
+			}
+			len = tp->saved_syn[0];
+			if (put_user(len, optlen)) {
+				release_sock(sk);
+				return -EFAULT;
+			}
+			if (copy_to_user(optval, tp->saved_syn + 1, len)) {
+				release_sock(sk);
+				return -EFAULT;
+			}
+			tcp_saved_syn_free(tp);
+			release_sock(sk);
+		} else {
+			release_sock(sk);
+			len = 0;
+			if (put_user(len, optlen))
+				return -EFAULT;
+		}
+		return 0;
+	}
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -3025,11 +3096,12 @@
 
 static void __init tcp_init_mem(void)
 {
-	unsigned long limit = nr_free_buffer_pages() / 8;
+	unsigned long limit = nr_free_buffer_pages() / 16;
+
 	limit = max(limit, 128UL);
-	sysctl_tcp_mem[0] = limit / 4 * 3;
-	sysctl_tcp_mem[1] = limit;
-	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
+	sysctl_tcp_mem[0] = limit / 4 * 3;		/* 4.68 % */
+	sysctl_tcp_mem[1] = limit;			/* 6.25 % */
+	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;	/* 9.37 % */
 }
 
 void __init tcp_init(void)
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
new file mode 100644
index 0000000..8c6fd3d
--- /dev/null
+++ b/net/ipv4/tcp_cdg.c
@@ -0,0 +1,433 @@
+/*
+ * CAIA Delay-Gradient (CDG) congestion control
+ *
+ * This implementation is based on the paper:
+ *   D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
+ *   delay gradients." In IFIP Networking, pages 328-341. Springer, 2011.
+ *
+ * Scavenger traffic (Less-than-Best-Effort) should disable coexistence
+ * heuristics using parameters use_shadow=0 and use_ineff=0.
+ *
+ * Parameters window, backoff_beta, and backoff_factor are crucial for
+ * throughput and delay. Future work is needed to determine better defaults,
+ * and to provide guidelines for use in different environments/contexts.
+ *
+ * Except for window, knobs are configured via /sys/module/tcp_cdg/parameters/.
+ * Parameter window is only configurable when loading tcp_cdg as a module.
+ *
+ * Notable differences from paper/FreeBSD:
+ *   o Using Hybrid Slow start and Proportional Rate Reduction.
+ *   o Add toggle for shadow window mechanism. Suggested by David Hayes.
+ *   o Add toggle for non-congestion loss tolerance.
+ *   o Scaling parameter G is changed to a backoff factor;
+ *     conversion is given by: backoff_factor = 1000/(G * window).
+ *   o Limit shadow window to 2 * cwnd, or to cwnd when application limited.
+ *   o More accurate e^-x.
+ */
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <net/tcp.h>
+
+#define HYSTART_ACK_TRAIN	1
+#define HYSTART_DELAY		2
+
+static int window __read_mostly = 8;
+static unsigned int backoff_beta __read_mostly = 0.7071 * 1024; /* sqrt 0.5 */
+static unsigned int backoff_factor __read_mostly = 42;
+static unsigned int hystart_detect __read_mostly = 3;
+static unsigned int use_ineff __read_mostly = 5;
+static bool use_shadow __read_mostly = true;
+static bool use_tolerance __read_mostly;
+
+module_param(window, int, 0444);
+MODULE_PARM_DESC(window, "gradient window size (power of two <= 256)");
+module_param(backoff_beta, uint, 0644);
+MODULE_PARM_DESC(backoff_beta, "backoff beta (0-1024)");
+module_param(backoff_factor, uint, 0644);
+MODULE_PARM_DESC(backoff_factor, "backoff probability scale factor");
+module_param(hystart_detect, uint, 0644);
+MODULE_PARM_DESC(hystart_detect, "use Hybrid Slow start "
+		 "(0: disabled, 1: ACK train, 2: delay threshold, 3: both)");
+module_param(use_ineff, uint, 0644);
+MODULE_PARM_DESC(use_ineff, "use ineffectual backoff detection (threshold)");
+module_param(use_shadow, bool, 0644);
+MODULE_PARM_DESC(use_shadow, "use shadow window heuristic");
+module_param(use_tolerance, bool, 0644);
+MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic");
+
+struct minmax {
+	union {
+		struct {
+			s32 min;
+			s32 max;
+		};
+		u64 v64;
+	};
+};
+
+enum cdg_state {
+	CDG_UNKNOWN = 0,
+	CDG_NONFULL = 1,
+	CDG_FULL    = 2,
+	CDG_BACKOFF = 3,
+};
+
+struct cdg {
+	struct minmax rtt;
+	struct minmax rtt_prev;
+	struct minmax *gradients;
+	struct minmax gsum;
+	bool gfilled;
+	u8  tail;
+	u8  state;
+	u8  delack;
+	u32 rtt_seq;
+	u32 undo_cwnd;
+	u32 shadow_wnd;
+	u16 backoff_cnt;
+	u16 sample_cnt;
+	s32 delay_min;
+	u32 last_ack;
+	u32 round_start;
+};
+
+/**
+ * nexp_u32 - negative base-e exponential
+ * @ux: x in units of micro
+ *
+ * Returns exp(ux * -1e-6) * U32_MAX.
+ */
+static u32 __pure nexp_u32(u32 ux)
+{
+	static const u16 v[] = {
+		/* exp(-x)*65536-1 for x = 0, 0.000256, 0.000512, ... */
+		65535,
+		65518, 65501, 65468, 65401, 65267, 65001, 64470, 63422,
+		61378, 57484, 50423, 38795, 22965, 8047,  987,   14,
+	};
+	u32 msb = ux >> 8;
+	u32 res;
+	int i;
+
+	/* Cut off when ux >= 2^24 (actual result is <= 222/U32_MAX). */
+	if (msb > U16_MAX)
+		return 0;
+
+	/* Scale first eight bits linearly: */
+	res = U32_MAX - (ux & 0xff) * (U32_MAX / 1000000);
+
+	/* Obtain e^(x + y + ...) by computing e^x * e^y * ...: */
+	for (i = 1; msb; i++, msb >>= 1) {
+		u32 y = v[i & -(msb & 1)] + U32_C(1);
+
+		res = ((u64)res * y) >> 16;
+	}
+
+	return res;
+}
+
+/* Based on the HyStart algorithm (by Ha et al.) that is implemented in
+ * tcp_cubic. Differences/experimental changes:
+ *   o Using Hayes' delayed ACK filter.
+ *   o Using a usec clock for the ACK train.
+ *   o Reset ACK train when application limited.
+ *   o Invoked at any cwnd (i.e. also when cwnd < 16).
+ *   o Invoked only when cwnd < ssthresh (i.e. not when cwnd == ssthresh).
+ */
+static void tcp_cdg_hystart_update(struct sock *sk)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min);
+	if (ca->delay_min == 0)
+		return;
+
+	if (hystart_detect & HYSTART_ACK_TRAIN) {
+		u32 now_us = div_u64(local_clock(), NSEC_PER_USEC);
+
+		if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
+			ca->last_ack = now_us;
+			ca->round_start = now_us;
+		} else if (before(now_us, ca->last_ack + 3000)) {
+			u32 base_owd = max(ca->delay_min / 2U, 125U);
+
+			ca->last_ack = now_us;
+			if (after(now_us, ca->round_start + base_owd)) {
+				NET_INC_STATS_BH(sock_net(sk),
+						 LINUX_MIB_TCPHYSTARTTRAINDETECT);
+				NET_ADD_STATS_BH(sock_net(sk),
+						 LINUX_MIB_TCPHYSTARTTRAINCWND,
+						 tp->snd_cwnd);
+				tp->snd_ssthresh = tp->snd_cwnd;
+				return;
+			}
+		}
+	}
+
+	if (hystart_detect & HYSTART_DELAY) {
+		if (ca->sample_cnt < 8) {
+			ca->sample_cnt++;
+		} else {
+			s32 thresh = max(ca->delay_min + ca->delay_min / 8U,
+					 125U);
+
+			if (ca->rtt.min > thresh) {
+				NET_INC_STATS_BH(sock_net(sk),
+						 LINUX_MIB_TCPHYSTARTDELAYDETECT);
+				NET_ADD_STATS_BH(sock_net(sk),
+						 LINUX_MIB_TCPHYSTARTDELAYCWND,
+						 tp->snd_cwnd);
+				tp->snd_ssthresh = tp->snd_cwnd;
+			}
+		}
+	}
+}
+
+static s32 tcp_cdg_grad(struct cdg *ca)
+{
+	s32 gmin = ca->rtt.min - ca->rtt_prev.min;
+	s32 gmax = ca->rtt.max - ca->rtt_prev.max;
+	s32 grad;
+
+	if (ca->gradients) {
+		ca->gsum.min += gmin - ca->gradients[ca->tail].min;
+		ca->gsum.max += gmax - ca->gradients[ca->tail].max;
+		ca->gradients[ca->tail].min = gmin;
+		ca->gradients[ca->tail].max = gmax;
+		ca->tail = (ca->tail + 1) & (window - 1);
+		gmin = ca->gsum.min;
+		gmax = ca->gsum.max;
+	}
+
+	/* We keep sums to ignore gradients during cwnd reductions;
+	 * the paper's smoothed gradients otherwise simplify to:
+	 * (rtt_latest - rtt_oldest) / window.
+	 *
+	 * We also drop division by window here.
+	 */
+	grad = gmin > 0 ? gmin : gmax;
+
+	/* Extrapolate missing values in gradient window: */
+	if (!ca->gfilled) {
+		if (!ca->gradients && window > 1)
+			grad *= window; /* Memory allocation failed. */
+		else if (ca->tail == 0)
+			ca->gfilled = true;
+		else
+			grad = (grad * window) / (int)ca->tail;
+	}
+
+	/* Backoff was effectual: */
+	if (gmin <= -32 || gmax <= -32)
+		ca->backoff_cnt = 0;
+
+	if (use_tolerance) {
+		/* Reduce small variations to zero: */
+		gmin = DIV_ROUND_CLOSEST(gmin, 64);
+		gmax = DIV_ROUND_CLOSEST(gmax, 64);
+
+		if (gmin > 0 && gmax <= 0)
+			ca->state = CDG_FULL;
+		else if ((gmin > 0 && gmax > 0) || gmax < 0)
+			ca->state = CDG_NONFULL;
+	}
+	return grad;
+}
+
+static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	if (prandom_u32() <= nexp_u32(grad * backoff_factor))
+		return false;
+
+	if (use_ineff) {
+		ca->backoff_cnt++;
+		if (ca->backoff_cnt > use_ineff)
+			return false;
+	}
+
+	ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
+	ca->state = CDG_BACKOFF;
+	tcp_enter_cwr(sk);
+	return true;
+}
+
+/* Not called in CWR or Recovery state. */
+static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+	u32 prior_snd_cwnd;
+	u32 incr;
+
+	if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+		tcp_cdg_hystart_update(sk);
+
+	if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
+		s32 grad = 0;
+
+		if (ca->rtt_prev.v64)
+			grad = tcp_cdg_grad(ca);
+		ca->rtt_seq = tp->snd_nxt;
+		ca->rtt_prev = ca->rtt;
+		ca->rtt.v64 = 0;
+		ca->last_ack = 0;
+		ca->sample_cnt = 0;
+
+		if (grad > 0 && tcp_cdg_backoff(sk, grad))
+			return;
+	}
+
+	if (!tcp_is_cwnd_limited(sk)) {
+		ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
+		return;
+	}
+
+	prior_snd_cwnd = tp->snd_cwnd;
+	tcp_reno_cong_avoid(sk, ack, acked);
+
+	incr = tp->snd_cwnd - prior_snd_cwnd;
+	ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
+}
+
+static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	if (rtt_us <= 0)
+		return;
+
+	/* A heuristic for filtering delayed ACKs, adapted from:
+	 * D.A. Hayes. "Timing enhancements to the FreeBSD kernel to support
+	 * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010.
+	 */
+	if (tp->sacked_out == 0) {
+		if (num_acked == 1 && ca->delack) {
+			/* A delayed ACK is only used for the minimum if it is
+			 * provenly lower than an existing non-zero minimum.
+			 */
+			ca->rtt.min = min(ca->rtt.min, rtt_us);
+			ca->delack--;
+			return;
+		} else if (num_acked > 1 && ca->delack < 5) {
+			ca->delack++;
+		}
+	}
+
+	ca->rtt.min = min_not_zero(ca->rtt.min, rtt_us);
+	ca->rtt.max = max(ca->rtt.max, rtt_us);
+}
+
+static u32 tcp_cdg_ssthresh(struct sock *sk)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	ca->undo_cwnd = tp->snd_cwnd;
+
+	if (ca->state == CDG_BACKOFF)
+		return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
+
+	if (ca->state == CDG_NONFULL && use_tolerance)
+		return tp->snd_cwnd;
+
+	ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
+	if (use_shadow)
+		return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
+	return max(2U, tp->snd_cwnd >> 1);
+}
+
+static u32 tcp_cdg_undo_cwnd(struct sock *sk)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+
+	return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd);
+}
+
+static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct minmax *gradients;
+
+	switch (ev) {
+	case CA_EVENT_CWND_RESTART:
+		gradients = ca->gradients;
+		if (gradients)
+			memset(gradients, 0, window * sizeof(gradients[0]));
+		memset(ca, 0, sizeof(*ca));
+
+		ca->gradients = gradients;
+		ca->rtt_seq = tp->snd_nxt;
+		ca->shadow_wnd = tp->snd_cwnd;
+		break;
+	case CA_EVENT_COMPLETE_CWR:
+		ca->state = CDG_UNKNOWN;
+		ca->rtt_seq = tp->snd_nxt;
+		ca->rtt_prev = ca->rtt;
+		ca->rtt.v64 = 0;
+		break;
+	default:
+		break;
+	}
+}
+
+static void tcp_cdg_init(struct sock *sk)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/* We silently fall back to window = 1 if allocation fails. */
+	if (window > 1)
+		ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
+					GFP_NOWAIT | __GFP_NOWARN);
+	ca->rtt_seq = tp->snd_nxt;
+	ca->shadow_wnd = tp->snd_cwnd;
+}
+
+static void tcp_cdg_release(struct sock *sk)
+{
+	struct cdg *ca = inet_csk_ca(sk);
+
+	kfree(ca->gradients);
+}
+
+struct tcp_congestion_ops tcp_cdg __read_mostly = {
+	.cong_avoid = tcp_cdg_cong_avoid,
+	.cwnd_event = tcp_cdg_cwnd_event,
+	.pkts_acked = tcp_cdg_acked,
+	.undo_cwnd = tcp_cdg_undo_cwnd,
+	.ssthresh = tcp_cdg_ssthresh,
+	.release = tcp_cdg_release,
+	.init = tcp_cdg_init,
+	.owner = THIS_MODULE,
+	.name = "cdg",
+};
+
+static int __init tcp_cdg_register(void)
+{
+	if (backoff_beta > 1024 || window < 1 || window > 256)
+		return -ERANGE;
+	if (!is_power_of_2(window))
+		return -EINVAL;
+
+	BUILD_BUG_ON(sizeof(struct cdg) > ICSK_CA_PRIV_SIZE);
+	tcp_register_congestion_control(&tcp_cdg);
+	return 0;
+}
+
+static void __exit tcp_cdg_unregister(void)
+{
+	tcp_unregister_congestion_control(&tcp_cdg);
+}
+
+module_init(tcp_cdg_register);
+module_exit(tcp_cdg_unregister);
+MODULE_AUTHOR("Kenneth Klette Jonassen");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TCP CDG");
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 4c41c12..7092a61 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -204,20 +204,26 @@
 
 	/* Expired RTT */
 	if (!before(tp->snd_una, ca->next_seq)) {
-		/* For avoiding denominator == 1. */
-		if (ca->acked_bytes_total == 0)
-			ca->acked_bytes_total = 1;
+		u64 bytes_ecn = ca->acked_bytes_ecn;
+		u32 alpha = ca->dctcp_alpha;
 
 		/* alpha = (1 - g) * alpha + g * F */
-		ca->dctcp_alpha = ca->dctcp_alpha -
-				  (ca->dctcp_alpha >> dctcp_shift_g) +
-				  (ca->acked_bytes_ecn << (10U - dctcp_shift_g)) /
-				  ca->acked_bytes_total;
 
-		if (ca->dctcp_alpha > DCTCP_MAX_ALPHA)
-			/* Clamp dctcp_alpha to max. */
-			ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+		alpha -= alpha >> dctcp_shift_g;
+		if (bytes_ecn) {
+			/* If dctcp_shift_g == 1, a 32bit value would overflow
+			 * after 8 Mbytes.
+			 */
+			bytes_ecn <<= (10 - dctcp_shift_g);
+			do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
 
+			alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+		}
+		/* dctcp_alpha can be read from dctcp_get_info() without
+		 * synchro, so we ask compiler to not use dctcp_alpha
+		 * as a temporary variable in prior operations.
+		 */
+		WRITE_ONCE(ca->dctcp_alpha, alpha);
 		dctcp_reset(tp, ca);
 	}
 }
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 79b34a0..479f349 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -19,13 +19,14 @@
 static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 			      void *_info)
 {
-	const struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_info *info = _info;
 
 	if (sk->sk_state == TCP_LISTEN) {
 		r->idiag_rqueue = sk->sk_ack_backlog;
 		r->idiag_wqueue = sk->sk_max_ack_backlog;
-	} else {
+	} else if (sk->sk_type == SOCK_STREAM) {
+		const struct tcp_sock *tp = tcp_sk(sk);
+
 		r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 		r->idiag_wqueue = tp->write_seq - tp->snd_una;
 	}
@@ -50,6 +51,7 @@
 	.dump_one	 = tcp_diag_dump_one,
 	.idiag_get_info	 = tcp_diag_get_info,
 	.idiag_type	 = IPPROTO_TCP,
+	.idiag_info_size = sizeof(struct tcp_info),
 };
 
 static int __init tcp_diag_init(void)
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 46b087a..f9c0fb8 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -78,8 +78,6 @@
 	struct tcp_fastopen_context *ctx;
 	bool ok = false;
 
-	tcp_fastopen_init_key_once(true);
-
 	rcu_read_lock();
 	ctx = rcu_dereference(tcp_fastopen_ctx);
 	if (ctx) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c9ab964..684f095 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -359,7 +359,7 @@
 	/* Check #1 */
 	if (tp->rcv_ssthresh < tp->window_clamp &&
 	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
-	    !sk_under_memory_pressure(sk)) {
+	    !tcp_under_memory_pressure(sk)) {
 		int incr;
 
 		/* Check #2. Increase window, if skb with such overhead
@@ -446,7 +446,7 @@
 
 	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-	    !sk_under_memory_pressure(sk) &&
+	    !tcp_under_memory_pressure(sk) &&
 	    sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
 		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
 				    sysctl_tcp_rmem[2]);
@@ -1130,7 +1130,12 @@
 struct tcp_sacktag_state {
 	int	reord;
 	int	fack_count;
-	long	rtt_us; /* RTT measured by SACKing never-retransmitted data */
+	/* Timestamps for earliest and latest never-retransmitted segment
+	 * that was SACKed. RTO needs the earliest RTT to stay conservative,
+	 * but congestion control should still get an accurate delay signal.
+	 */
+	struct skb_mstamp first_sackt;
+	struct skb_mstamp last_sackt;
 	int	flag;
 };
 
@@ -1233,14 +1238,9 @@
 							   state->reord);
 				if (!after(end_seq, tp->high_seq))
 					state->flag |= FLAG_ORIG_SACK_ACKED;
-				/* Pick the earliest sequence sacked for RTT */
-				if (state->rtt_us < 0) {
-					struct skb_mstamp now;
-
-					skb_mstamp_get(&now);
-					state->rtt_us = skb_mstamp_us_delta(&now,
-								xmit_time);
-				}
+				if (state->first_sackt.v64 == 0)
+					state->first_sackt = *xmit_time;
+				state->last_sackt = *xmit_time;
 			}
 
 			if (sacked & TCPCB_LOST) {
@@ -1316,16 +1316,12 @@
 	 * code can come after this skb later on it's better to keep
 	 * setting gso_size to something.
 	 */
-	if (!skb_shinfo(prev)->gso_size) {
-		skb_shinfo(prev)->gso_size = mss;
-		skb_shinfo(prev)->gso_type = sk->sk_gso_type;
-	}
+	if (!TCP_SKB_CB(prev)->tcp_gso_size)
+		TCP_SKB_CB(prev)->tcp_gso_size = mss;
 
 	/* CHECKME: To clear or not to clear? Mimics normal skb currently */
-	if (tcp_skb_pcount(skb) <= 1) {
-		skb_shinfo(skb)->gso_size = 0;
-		skb_shinfo(skb)->gso_type = 0;
-	}
+	if (tcp_skb_pcount(skb) <= 1)
+		TCP_SKB_CB(skb)->tcp_gso_size = 0;
 
 	/* Difference in this won't matter, both ACKed by the same cumul. ACK */
 	TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1634,7 +1630,7 @@
 
 static int
 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
-			u32 prior_snd_una, long *sack_rtt_us)
+			u32 prior_snd_una, struct tcp_sacktag_state *state)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1642,7 +1638,6 @@
 	struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
 	struct tcp_sack_block sp[TCP_NUM_SACKS];
 	struct tcp_sack_block *cache;
-	struct tcp_sacktag_state state;
 	struct sk_buff *skb;
 	int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
 	int used_sacks;
@@ -1650,9 +1645,8 @@
 	int i, j;
 	int first_sack_index;
 
-	state.flag = 0;
-	state.reord = tp->packets_out;
-	state.rtt_us = -1L;
+	state->flag = 0;
+	state->reord = tp->packets_out;
 
 	if (!tp->sacked_out) {
 		if (WARN_ON(tp->fackets_out))
@@ -1663,7 +1657,7 @@
 	found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
 					 num_sacks, prior_snd_una);
 	if (found_dup_sack)
-		state.flag |= FLAG_DSACKING_ACK;
+		state->flag |= FLAG_DSACKING_ACK;
 
 	/* Eliminate too old ACKs, but take into
 	 * account more or less fresh ones, they can
@@ -1728,7 +1722,7 @@
 	}
 
 	skb = tcp_write_queue_head(sk);
-	state.fack_count = 0;
+	state->fack_count = 0;
 	i = 0;
 
 	if (!tp->sacked_out) {
@@ -1762,10 +1756,10 @@
 
 			/* Head todo? */
 			if (before(start_seq, cache->start_seq)) {
-				skb = tcp_sacktag_skip(skb, sk, &state,
+				skb = tcp_sacktag_skip(skb, sk, state,
 						       start_seq);
 				skb = tcp_sacktag_walk(skb, sk, next_dup,
-						       &state,
+						       state,
 						       start_seq,
 						       cache->start_seq,
 						       dup_sack);
@@ -1776,7 +1770,7 @@
 				goto advance_sp;
 
 			skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
-						       &state,
+						       state,
 						       cache->end_seq);
 
 			/* ...tail remains todo... */
@@ -1785,12 +1779,12 @@
 				skb = tcp_highest_sack(sk);
 				if (!skb)
 					break;
-				state.fack_count = tp->fackets_out;
+				state->fack_count = tp->fackets_out;
 				cache++;
 				goto walk;
 			}
 
-			skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
+			skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
 			/* Check overlap against next cached too (past this one already) */
 			cache++;
 			continue;
@@ -1800,12 +1794,12 @@
 			skb = tcp_highest_sack(sk);
 			if (!skb)
 				break;
-			state.fack_count = tp->fackets_out;
+			state->fack_count = tp->fackets_out;
 		}
-		skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
+		skb = tcp_sacktag_skip(skb, sk, state, start_seq);
 
 walk:
-		skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+		skb = tcp_sacktag_walk(skb, sk, next_dup, state,
 				       start_seq, end_seq, dup_sack);
 
 advance_sp:
@@ -1820,9 +1814,9 @@
 	for (j = 0; j < used_sacks; j++)
 		tp->recv_sack_cache[i++] = sp[j];
 
-	if ((state.reord < tp->fackets_out) &&
+	if ((state->reord < tp->fackets_out) &&
 	    ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
-		tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
+		tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
 	tcp_mark_lost_retrans(sk);
 	tcp_verify_left_out(tp);
@@ -1834,8 +1828,7 @@
 	WARN_ON((int)tp->retrans_out < 0);
 	WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
-	*sack_rtt_us = state.rtt_us;
-	return state.flag;
+	return state->flag;
 }
 
 /* Limits sacked_out so that sum with lost_out isn't ever larger than
@@ -2255,7 +2248,7 @@
 			    (oldcnt >= packets))
 				break;
 
-			mss = skb_shinfo(skb)->gso_size;
+			mss = tcp_skb_mss(skb);
 			err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
 					   mss, GFP_ATOMIC);
 			if (err < 0)
@@ -2555,6 +2548,7 @@
 		tcp_set_ca_state(sk, TCP_CA_CWR);
 	}
 }
+EXPORT_SYMBOL(tcp_enter_cwr);
 
 static void tcp_try_keep_open(struct sock *sk)
 {
@@ -3055,7 +3049,8 @@
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-			       u32 prior_snd_una, long sack_rtt_us)
+			       u32 prior_snd_una,
+			       struct tcp_sacktag_state *sack)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct skb_mstamp first_ackt, last_ackt, now;
@@ -3063,8 +3058,9 @@
 	u32 prior_sacked = tp->sacked_out;
 	u32 reord = tp->packets_out;
 	bool fully_acked = true;
-	long ca_seq_rtt_us = -1L;
+	long sack_rtt_us = -1L;
 	long seq_rtt_us = -1L;
+	long ca_rtt_us = -1L;
 	struct sk_buff *skb;
 	u32 pkts_acked = 0;
 	bool rtt_update;
@@ -3153,15 +3149,16 @@
 	skb_mstamp_get(&now);
 	if (likely(first_ackt.v64)) {
 		seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
-		ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+		ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+	}
+	if (sack->first_sackt.v64) {
+		sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
+		ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
 	}
 
 	rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
 
 	if (flag & FLAG_ACKED) {
-		const struct tcp_congestion_ops *ca_ops
-			= inet_csk(sk)->icsk_ca_ops;
-
 		tcp_rearm_rto(sk);
 		if (unlikely(icsk->icsk_mtup.probe_size &&
 			     !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
@@ -3184,11 +3181,6 @@
 
 		tp->fackets_out -= min(pkts_acked, tp->fackets_out);
 
-		if (ca_ops->pkts_acked) {
-			long rtt_us = min_t(ulong, ca_seq_rtt_us, sack_rtt_us);
-			ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
-		}
-
 	} else if (skb && rtt_update && sack_rtt_us >= 0 &&
 		   sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
 		/* Do not re-arm RTO if the sack RTT is measured from data sent
@@ -3198,6 +3190,9 @@
 		tcp_rearm_rto(sk);
 	}
 
+	if (icsk->icsk_ca_ops->pkts_acked)
+		icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us);
+
 #if FASTRETRANS_DEBUG > 0
 	WARN_ON((int)tp->sacked_out < 0);
 	WARN_ON((int)tp->lost_out < 0);
@@ -3238,7 +3233,7 @@
 		 * This function is not for random using!
 		 */
 	} else {
-		unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+		unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 					  when, TCP_RTO_MAX);
@@ -3466,6 +3461,7 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcp_sacktag_state sack_state;
 	u32 prior_snd_una = tp->snd_una;
 	u32 ack_seq = TCP_SKB_CB(skb)->seq;
 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -3474,7 +3470,8 @@
 	int prior_packets = tp->packets_out;
 	const int prior_unsacked = tp->packets_out - tp->sacked_out;
 	int acked = 0; /* Number of packets newly acked */
-	long sack_rtt_us = -1L;
+
+	sack_state.first_sackt.v64 = 0;
 
 	/* We very likely will need to access write queue head. */
 	prefetchw(sk->sk_write_queue.next);
@@ -3538,7 +3535,7 @@
 
 		if (TCP_SKB_CB(skb)->sacked)
 			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-							&sack_rtt_us);
+							&sack_state);
 
 		if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
 			flag |= FLAG_ECE;
@@ -3563,7 +3560,7 @@
 	/* See if we can take anything off of the retransmit queue. */
 	acked = tp->packets_out;
 	flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
-				    sack_rtt_us);
+				    &sack_state);
 	acked -= tp->packets_out;
 
 	/* Advance cwnd if state allows */
@@ -3615,7 +3612,7 @@
 	 */
 	if (TCP_SKB_CB(skb)->sacked) {
 		flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-						&sack_rtt_us);
+						&sack_state);
 		tcp_fastretrans_alert(sk, acked, prior_unsacked,
 				      is_dupack, flag);
 	}
@@ -4514,10 +4511,12 @@
 
 		if (eaten <= 0) {
 queue_and_out:
-			if (eaten < 0 &&
-			    tcp_try_rmem_schedule(sk, skb, skb->truesize))
-				goto drop;
-
+			if (eaten < 0) {
+				if (skb_queue_len(&sk->sk_receive_queue) == 0)
+					sk_forced_mem_schedule(sk, skb->truesize);
+				else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+					goto drop;
+			}
 			eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
 		}
 		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
@@ -4788,7 +4787,7 @@
 
 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		tcp_clamp_window(sk);
-	else if (sk_under_memory_pressure(sk))
+	else if (tcp_under_memory_pressure(sk))
 		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
 	tcp_collapse_ofo_queue(sk);
@@ -4832,7 +4831,7 @@
 		return false;
 
 	/* If we are under global TCP memory pressure, do not expand.  */
-	if (sk_under_memory_pressure(sk))
+	if (tcp_under_memory_pressure(sk))
 		return false;
 
 	/* If we are under soft global TCP memory pressure, do not expand.  */
@@ -6067,6 +6066,23 @@
 	return want_cookie;
 }
 
+static void tcp_reqsk_record_syn(const struct sock *sk,
+				 struct request_sock *req,
+				 const struct sk_buff *skb)
+{
+	if (tcp_sk(sk)->save_syn) {
+		u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
+		u32 *copy;
+
+		copy = kmalloc(len + sizeof(u32), GFP_ATOMIC);
+		if (copy) {
+			copy[0] = len;
+			memcpy(&copy[1], skb_network_header(skb), len);
+			req->saved_syn = copy;
+		}
+	}
+}
+
 int tcp_conn_request(struct request_sock_ops *rsk_ops,
 		     const struct tcp_request_sock_ops *af_ops,
 		     struct sock *sk, struct sk_buff *skb)
@@ -6199,6 +6215,7 @@
 		tcp_rsk(req)->tfo_listener = false;
 		af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
 	}
+	tcp_reqsk_record_syn(sk, req, skb);
 
 	return 0;
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fc1c658..d7d4c2b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1400,7 +1400,7 @@
 		return 0;
 	}
 
-	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+	if (tcp_checksum_complete(skb))
 		goto csum_err;
 
 	if (sk->sk_state == TCP_LISTEN) {
@@ -1626,6 +1626,7 @@
 	skb->dev = NULL;
 
 	bh_lock_sock_nested(sk);
+	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
 	ret = 0;
 	if (!sock_owned_by_user(sk)) {
 		if (!tcp_prequeue(sk, skb))
@@ -1646,7 +1647,7 @@
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto discard_it;
 
-	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
+	if (tcp_checksum_complete(skb)) {
 csum_error:
 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
 bad_packet:
@@ -1670,10 +1671,6 @@
 		goto discard_it;
 	}
 
-	if (skb->len < (th->doff << 2)) {
-		inet_twsk_put(inet_twsk(sk));
-		goto bad_packet;
-	}
 	if (tcp_checksum_complete(skb)) {
 		inet_twsk_put(inet_twsk(sk));
 		goto csum_error;
@@ -1802,6 +1799,7 @@
 
 	/* If socket is aborted during connect operation */
 	tcp_free_fastopen_req(tp);
+	tcp_saved_syn_free(tp);
 
 	sk_sockets_allocated_dec(sk);
 	sock_release_memcg(sk);
@@ -2410,12 +2408,15 @@
 			goto fail;
 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
 	}
+
 	net->ipv4.sysctl_tcp_ecn = 2;
+	net->ipv4.sysctl_tcp_ecn_fallback = 1;
+
 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
-	return 0;
 
+	return 0;
 fail:
 	tcp_sk_exit(net);
 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 17e7339..4bc00cb 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -451,6 +451,7 @@
 
 		newtp->rcv_wup = newtp->copied_seq =
 		newtp->rcv_nxt = treq->rcv_isn + 1;
+		newtp->segs_in = 0;
 
 		newtp->snd_sml = newtp->snd_una =
 		newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@ -539,6 +540,9 @@
 		newtp->fastopen_rsk = NULL;
 		newtp->syn_data_acked = 0;
 
+		newtp->saved_syn = req->saved_syn;
+		req->saved_syn = NULL;
+
 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
 	}
 	return newsk;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 3f7c2fc..9864a2d 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -77,7 +77,7 @@
 	oldlen = (u16)~skb->len;
 	__skb_pull(skb, thlen);
 
-	mss = tcp_skb_mss(skb);
+	mss = skb_shinfo(skb)->gso_size;
 	if (unlikely(skb->len <= mss))
 		goto out;
 
@@ -242,7 +242,7 @@
 		flush |= *(u32 *)((u8 *)th + i) ^
 			 *(u32 *)((u8 *)th2 + i);
 
-	mss = tcp_skb_mss(p);
+	mss = skb_shinfo(p)->gso_size;
 
 	flush |= (len - 1) >= mss;
 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a369e8a..b1c218d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -50,8 +50,8 @@
  */
 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
 
-/* Default TSQ limit of two TSO segments */
-int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
+/* Default TSQ limit of four TSO segments */
+int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
 
 /* This limits the percentage of the congestion window which we
  * will allow a single TSO frame to consume.  Building TSO frames
@@ -350,6 +350,15 @@
 	}
 }
 
+static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
+{
+	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
+		/* tp->ecn_flags are cleared at a later point in time when
+		 * SYN ACK is ultimatively being received.
+		 */
+		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
+}
+
 static void
 tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
 		    struct sock *sk)
@@ -393,8 +402,6 @@
  */
 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 {
-	struct skb_shared_info *shinfo = skb_shinfo(skb);
-
 	skb->ip_summed = CHECKSUM_PARTIAL;
 	skb->csum = 0;
 
@@ -402,8 +409,6 @@
 	TCP_SKB_CB(skb)->sacked = 0;
 
 	tcp_skb_pcount_set(skb, 1);
-	shinfo->gso_size = 0;
-	shinfo->gso_type = 0;
 
 	TCP_SKB_CB(skb)->seq = seq;
 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
@@ -994,6 +999,7 @@
 	}
 
 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
+	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
 		tcp_ecn_send(sk, skb, tcp_header_size);
 
@@ -1018,8 +1024,10 @@
 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
 			      tcp_skb_pcount(skb));
 
-	/* OK, its time to fill skb_shinfo(skb)->gso_segs */
+	tp->segs_out += tcp_skb_pcount(skb);
+	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
+	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
 
 	/* Our usage of tstamp should remain private */
 	skb->tstamp.tv64 = 0;
@@ -1056,25 +1064,17 @@
 }
 
 /* Initialize TSO segments for a packet. */
-static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
-				 unsigned int mss_now)
+static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
 {
-	struct skb_shared_info *shinfo = skb_shinfo(skb);
-
-	/* Make sure we own this skb before messing gso_size/gso_segs */
-	WARN_ON_ONCE(skb_cloned(skb));
-
 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
 		/* Avoid the costly divide in the normal
 		 * non-TSO case.
 		 */
 		tcp_skb_pcount_set(skb, 1);
-		shinfo->gso_size = 0;
-		shinfo->gso_type = 0;
+		TCP_SKB_CB(skb)->tcp_gso_size = 0;
 	} else {
 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
-		shinfo->gso_size = mss_now;
-		shinfo->gso_type = sk->sk_gso_type;
+		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
 	}
 }
 
@@ -1163,7 +1163,7 @@
 		return -ENOMEM;
 
 	/* Get a new skb... force flag on. */
-	buff = sk_stream_alloc_skb(sk, nsize, gfp);
+	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
 	if (!buff)
 		return -ENOMEM; /* We'll just try again later. */
 
@@ -1206,8 +1206,8 @@
 	old_factor = tcp_skb_pcount(skb);
 
 	/* Fix up tso_factor for both original and new SKB.  */
-	tcp_set_skb_tso_segs(sk, skb, mss_now);
-	tcp_set_skb_tso_segs(sk, buff, mss_now);
+	tcp_set_skb_tso_segs(skb, mss_now);
+	tcp_set_skb_tso_segs(buff, mss_now);
 
 	/* If this packet has been sent out already, we must
 	 * adjust the various packet counters.
@@ -1287,7 +1287,7 @@
 
 	/* Any change of skb->len requires recalculation of tso factor. */
 	if (tcp_skb_pcount(skb) > 1)
-		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
+		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
 
 	return 0;
 }
@@ -1619,13 +1619,12 @@
  * This must be invoked the first time we consider transmitting
  * SKB onto the wire.
  */
-static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
-			     unsigned int mss_now)
+static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
 {
 	int tso_segs = tcp_skb_pcount(skb);
 
 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
-		tcp_set_skb_tso_segs(sk, skb, mss_now);
+		tcp_set_skb_tso_segs(skb, mss_now);
 		tso_segs = tcp_skb_pcount(skb);
 	}
 	return tso_segs;
@@ -1680,7 +1679,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 	unsigned int cwnd_quota;
 
-	tcp_init_tso_segs(sk, skb, cur_mss);
+	tcp_init_tso_segs(skb, cur_mss);
 
 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
 		return 0;
@@ -1722,7 +1721,7 @@
 	if (skb->len != skb->data_len)
 		return tcp_fragment(sk, skb, len, mss_now, gfp);
 
-	buff = sk_stream_alloc_skb(sk, 0, gfp);
+	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
 	if (unlikely(!buff))
 		return -ENOMEM;
 
@@ -1749,8 +1748,8 @@
 	tcp_fragment_tstamp(skb, buff);
 
 	/* Fix up tso_factor for both original and new SKB.  */
-	tcp_set_skb_tso_segs(sk, skb, mss_now);
-	tcp_set_skb_tso_segs(sk, buff, mss_now);
+	tcp_set_skb_tso_segs(skb, mss_now);
+	tcp_set_skb_tso_segs(buff, mss_now);
 
 	/* Link BUFF into the send queue. */
 	__skb_header_release(buff);
@@ -1941,7 +1940,7 @@
 	}
 
 	/* We're allowed to probe.  Build it now. */
-	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
+	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
 	if (!nskb)
 		return -1;
 	sk->sk_wmem_queued += nskb->truesize;
@@ -1984,7 +1983,7 @@
 								 skb->len, 0);
 			} else {
 				__pskb_trim_head(skb, copy);
-				tcp_set_skb_tso_segs(sk, skb, mss_now);
+				tcp_set_skb_tso_segs(skb, mss_now);
 			}
 			TCP_SKB_CB(skb)->seq += copy;
 		}
@@ -1994,7 +1993,7 @@
 		if (len >= probe_size)
 			break;
 	}
-	tcp_init_tso_segs(sk, nskb, nskb->len);
+	tcp_init_tso_segs(nskb, nskb->len);
 
 	/* We're ready to send.  If this fails, the probe will
 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
@@ -2056,7 +2055,7 @@
 	while ((skb = tcp_send_head(sk))) {
 		unsigned int limit;
 
-		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
+		tso_segs = tcp_init_tso_segs(skb, mss_now);
 		BUG_ON(!tso_segs);
 
 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
@@ -2078,7 +2077,7 @@
 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
 			break;
 
-		if (tso_segs == 1 || !max_segs) {
+		if (tso_segs == 1) {
 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
 						     (tcp_skb_is_last(sk, skb) ?
 						      nonagle : TCP_NAGLE_PUSH))))
@@ -2091,7 +2090,7 @@
 		}
 
 		limit = mss_now;
-		if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
+		if (tso_segs > 1 && !tcp_urg_mode(tp))
 			limit = tcp_mss_split_point(sk, skb, mss_now,
 						    min_t(unsigned int,
 							  cwnd_quota,
@@ -2392,7 +2391,7 @@
 	if (free_space < (full_space >> 1)) {
 		icsk->icsk_ack.quick = 0;
 
-		if (sk_under_memory_pressure(sk))
+		if (tcp_under_memory_pressure(sk))
 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
 					       4U * tp->advmss);
 
@@ -2610,11 +2609,15 @@
 		if (unlikely(oldpcount > 1)) {
 			if (skb_unclone(skb, GFP_ATOMIC))
 				return -ENOMEM;
-			tcp_init_tso_segs(sk, skb, cur_mss);
+			tcp_init_tso_segs(skb, cur_mss);
 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
 		}
 	}
 
+	/* RFC3168, section 6.1.1.1. ECN fallback */
+	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
+		tcp_ecn_clear_syn(sk, skb);
+
 	tcp_retrans_try_collapse(sk, skb, cur_mss);
 
 	/* Make a copy, if the first transmission SKB clone we made
@@ -2816,8 +2819,10 @@
  * connection tear down and (memory) recovery.
  * Otherwise tcp_send_fin() could be tempted to either delay FIN
  * or even be forced to close flow without any FIN.
+ * In general, we want to allow one skb per socket to avoid hangs
+ * with edge trigger epoll()
  */
-static void sk_forced_wmem_schedule(struct sock *sk, int size)
+void sk_forced_mem_schedule(struct sock *sk, int size)
 {
 	int amt, status;
 
@@ -2841,7 +2846,7 @@
 	 * Note: in the latter case, FIN packet will be sent after a timeout,
 	 * as TCP stack thinks it has already been transmitted.
 	 */
-	if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+	if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
 coalesce:
 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
 		TCP_SKB_CB(tskb)->end_seq++;
@@ -2864,7 +2869,7 @@
 			return;
 		}
 		skb_reserve(skb, MAX_TCP_HEADER);
-		sk_forced_wmem_schedule(sk, skb->truesize);
+		sk_forced_mem_schedule(sk, skb->truesize);
 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
 		tcp_init_nondata_skb(skb, tp->write_seq,
 				     TCPHDR_ACK | TCPHDR_FIN);
@@ -3175,7 +3180,7 @@
 	/* limit to order-0 allocations */
 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
 
-	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
+	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
 	if (!syn_data)
 		goto fallback;
 	syn_data->ip_summed = CHECKSUM_PARTIAL;
@@ -3241,7 +3246,7 @@
 		return 0;
 	}
 
-	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
 	if (unlikely(!buff))
 		return -ENOBUFS;
 
@@ -3382,7 +3387,7 @@
  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
  * out-of-date with SND.UNA-1 to probe window.
  */
-static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
+static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
@@ -3400,6 +3405,7 @@
 	 */
 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
 	skb_mstamp_get(&skb->skb_mstamp);
+	NET_INC_STATS_BH(sock_net(sk), mib);
 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
@@ -3407,12 +3413,12 @@
 {
 	if (sk->sk_state == TCP_ESTABLISHED) {
 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
-		tcp_xmit_probe_skb(sk, 0);
+		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
 	}
 }
 
 /* Initiate keepalive or window probe from timer. */
-int tcp_write_wakeup(struct sock *sk)
+int tcp_write_wakeup(struct sock *sk, int mib)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
@@ -3440,7 +3446,7 @@
 			if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
 				return -1;
 		} else if (!tcp_skb_pcount(skb))
-			tcp_set_skb_tso_segs(sk, skb, mss);
+			tcp_set_skb_tso_segs(skb, mss);
 
 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
@@ -3449,8 +3455,8 @@
 		return err;
 	} else {
 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
-			tcp_xmit_probe_skb(sk, 1);
-		return tcp_xmit_probe_skb(sk, 0);
+			tcp_xmit_probe_skb(sk, 1, mib);
+		return tcp_xmit_probe_skb(sk, 0, mib);
 	}
 }
 
@@ -3464,7 +3470,7 @@
 	unsigned long probe_max;
 	int err;
 
-	err = tcp_write_wakeup(sk);
+	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
 
 	if (tp->packets_out || !tcp_send_head(sk)) {
 		/* Cancel probe timer, if it is not required. */
@@ -3490,7 +3496,7 @@
 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
 	}
 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-				  inet_csk_rto_backoff(icsk, probe_max),
+				  tcp_probe0_when(sk, probe_max),
 				  TCP_RTO_MAX);
 }
 
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8c65dc1..5b752f5 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -247,7 +247,7 @@
 	}
 
 out:
-	if (sk_under_memory_pressure(sk))
+	if (tcp_under_memory_pressure(sk))
 		sk_mem_reclaim(sk);
 }
 
@@ -616,7 +616,7 @@
 			tcp_write_err(sk);
 			goto out;
 		}
-		if (tcp_write_wakeup(sk) <= 0) {
+		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
 			icsk->icsk_probes_out++;
 			elapsed = keepalive_intvl_when(tp);
 		} else {
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index b763c39..6116604 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -170,6 +170,7 @@
 	.dump_one	 = udp_diag_dump_one,
 	.idiag_get_info  = udp_diag_get_info,
 	.idiag_type	 = IPPROTO_UDP,
+	.idiag_info_size = 0,
 };
 
 static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
@@ -190,6 +191,7 @@
 	.dump_one	 = udplite_diag_dump_one,
 	.idiag_get_info  = udp_diag_get_info,
 	.idiag_type	 = IPPROTO_UDPLITE,
+	.idiag_info_size = 0,
 };
 
 static int __init udp_diag_init(void)
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 6bb98cc1..933ea90 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -15,12 +15,10 @@
 	struct socket *sock = NULL;
 	struct sockaddr_in udp_addr;
 
-	err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
+	err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
 	if (err < 0)
 		goto error;
 
-	sk_change_net(sock->sk, net);
-
 	udp_addr.sin_family = AF_INET;
 	udp_addr.sin_addr = cfg->local_ip;
 	udp_addr.sin_port = cfg->local_udp_port;
@@ -47,7 +45,7 @@
 error:
 	if (sock) {
 		kernel_sock_shutdown(sock, SHUT_RDWR);
-		sk_release_kernel(sock->sk);
+		sock_release(sock);
 	}
 	*sockp = NULL;
 	return err;
@@ -101,7 +99,7 @@
 {
 	rcu_assign_sk_user_data(sock->sk, NULL);
 	kernel_sock_shutdown(sock, SHUT_RDWR);
-	sk_release_kernel(sock->sk);
+	sock_release(sock);
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
 
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2e8c061..0f3f199 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -48,4 +48,5 @@
 
 ifneq ($(CONFIG_IPV6),)
 obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o
+obj-y += mcast_snoop.o
 endif
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 37b70e8..21c2c81 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2121,6 +2121,8 @@
 	fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
 	if (!fn)
 		goto out;
+
+	noflags |= RTF_CACHE;
 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 		if (rt->dst.dev->ifindex != dev->ifindex)
 			continue;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index eef63b3..7de52b6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -167,7 +167,7 @@
 	WARN_ON(!answer_prot->slab);
 
 	err = -ENOBUFS;
-	sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
+	sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern);
 	if (!sk)
 		goto out;
 
@@ -362,7 +362,8 @@
 		np->saddr = addr->sin6_addr;
 
 	/* Make sure we are allowed to bind here. */
-	if (sk->sk_prot->get_port(sk, snum)) {
+	if ((snum || !inet->bind_address_no_port) &&
+	    sk->sk_prot->get_port(sk, snum)) {
 		inet_reset_saddr(sk);
 		err = -EADDRINUSE;
 		goto out;
@@ -768,6 +769,7 @@
 	net->ipv6.sysctl.auto_flowlabels = 0;
 	net->ipv6.sysctl.idgen_retries = 3;
 	net->ipv6.sysctl.idgen_delay = 1 * HZ;
+	net->ipv6.sysctl.flowlabel_state_ranges = 1;
 	atomic_set(&net->ipv6.fib6_sernum, 1);
 
 	err = ipv6_init_mibs(net);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 762a58c..62d908e 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -325,6 +325,16 @@
 	kfree_skb(skb);
 }
 
+/* For some errors we have valid addr_offset even with zero payload and
+ * zero port. Also, addr_offset should be supported if port is set.
+ */
+static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
+{
+	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
+	       serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
+}
+
 /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
  *
  * At one point, excluding local errors was a quick test to identify icmp/icmp6
@@ -389,7 +399,7 @@
 
 	serr = SKB_EXT_ERR(skb);
 
-	if (sin && serr->port) {
+	if (sin && ipv6_datagram_support_addr(serr)) {
 		const unsigned char *nh = skb_network_header(skb);
 		sin->sin6_family = AF_INET6;
 		sin->sin6_flowinfo = 0;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 2c2b5d5..713d743 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -207,7 +207,7 @@
 			struct inet_peer *peer;
 
 			peer = inet_getpeer_v6(net->ipv6.peers,
-					       &rt->rt6i_dst.addr, 1);
+					       &fl6->daddr, 1);
 			res = inet_peer_xrlim_allow(peer, tmo);
 			if (peer)
 				inet_putpeer(peer);
@@ -337,7 +337,7 @@
 	 * We won't send icmp if the destination is known
 	 * anycast.
 	 */
-	if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
+	if (ipv6_anycast_destination(dst, &fl6->daddr)) {
 		net_dbg_ratelimited("icmp6_send: acast source\n");
 		dst_release(dst);
 		return ERR_PTR(-EINVAL);
@@ -564,7 +564,7 @@
 
 	if (!ipv6_unicast_destination(skb) &&
 	    !(net->ipv6.sysctl.anycast_src_echo_reply &&
-	      ipv6_anycast_destination(skb)))
+	      ipv6_anycast_destination(skb_dst(skb), saddr)))
 		saddr = NULL;
 
 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 871641b..b4fd96d 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -257,7 +257,7 @@
 	return -EADDRNOTAVAIL;
 }
 
-static inline u32 inet6_sk_port_offset(const struct sock *sk)
+static u32 inet6_sk_port_offset(const struct sock *sk)
 {
 	const struct inet_sock *inet = inet_sk(sk);
 
@@ -269,7 +269,11 @@
 int inet6_hash_connect(struct inet_timewait_death_row *death_row,
 		       struct sock *sk)
 {
-	return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
+	u32 port_offset = 0;
+
+	if (!inet_sk(sk)->inet_num)
+		port_offset = inet6_sk_port_offset(sk);
+	return __inet_hash_connect(death_row, sk, port_offset,
 				   __inet6_check_established);
 }
 EXPORT_SYMBOL_GPL(inet6_hash_connect);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bde57b1..55d1986 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -154,10 +154,32 @@
 	kmem_cache_free(fib6_node_kmem, fn);
 }
 
+static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
+{
+	int cpu;
+
+	if (!non_pcpu_rt->rt6i_pcpu)
+		return;
+
+	for_each_possible_cpu(cpu) {
+		struct rt6_info **ppcpu_rt;
+		struct rt6_info *pcpu_rt;
+
+		ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
+		pcpu_rt = *ppcpu_rt;
+		if (pcpu_rt) {
+			dst_free(&pcpu_rt->dst);
+			*ppcpu_rt = NULL;
+		}
+	}
+}
+
 static void rt6_release(struct rt6_info *rt)
 {
-	if (atomic_dec_and_test(&rt->rt6i_ref))
+	if (atomic_dec_and_test(&rt->rt6i_ref)) {
+		rt6_free_pcpu(rt);
 		dst_free(&rt->dst);
+	}
 }
 
 static void fib6_link_table(struct net *net, struct fib6_table *tb)
@@ -738,6 +760,7 @@
 					rt6_clean_expires(iter);
 				else
 					rt6_set_expires(iter, rt->dst.expires);
+				iter->rt6i_pmtu = rt->rt6i_pmtu;
 				return -EEXIST;
 			}
 			/* If we have the same destination and the same metric,
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index d491125..1f9ebe3 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -595,6 +595,10 @@
 		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
 			return -EINVAL;
 
+		if (net->ipv6.sysctl.flowlabel_state_ranges &&
+		    (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
+			return -ERANGE;
+
 		fl = fl_create(net, sk, &freq, optval, optlen, &err);
 		if (!fl)
 			return err;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index bc09cb9..d5f7716 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -105,7 +105,7 @@
 	}
 
 	rcu_read_lock_bh();
-	nexthop = rt6_nexthop((struct rt6_info *)dst);
+	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 	if (unlikely(!neigh))
 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -459,7 +459,7 @@
 		else
 			target = &hdr->daddr;
 
-		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
 
 		/* Limit redirects both by destination (here)
 		   and by source (inside ndisc_send_redirect)
@@ -551,7 +551,7 @@
 	struct frag_hdr *fh;
 	unsigned int mtu, hlen, left, len;
 	int hroom, troom;
-	__be32 frag_id = 0;
+	__be32 frag_id;
 	int ptr, offset = 0, err = 0;
 	u8 *prevhdr, nexthdr = 0;
 	struct net *net = dev_net(skb_dst(skb)->dev);
@@ -564,18 +564,17 @@
 	/* We must not fragment if the socket is set to force MTU discovery
 	 * or if the skb it not generated by a local socket.
 	 */
-	if (unlikely(!skb->ignore_df && skb->len > mtu) ||
-		     (IP6CB(skb)->frag_max_size &&
-		      IP6CB(skb)->frag_max_size > mtu)) {
-		if (skb->sk && dst_allfrag(skb_dst(skb)))
-			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+	if (unlikely(!skb->ignore_df && skb->len > mtu))
+		goto fail_toobig;
 
-		skb->dev = skb_dst(skb)->dev;
-		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-			      IPSTATS_MIB_FRAGFAILS);
-		kfree_skb(skb);
-		return -EMSGSIZE;
+	if (IP6CB(skb)->frag_max_size) {
+		if (IP6CB(skb)->frag_max_size > mtu)
+			goto fail_toobig;
+
+		/* don't send fragments larger than what we received */
+		mtu = IP6CB(skb)->frag_max_size;
+		if (mtu < IPV6_MIN_MTU)
+			mtu = IPV6_MIN_MTU;
 	}
 
 	if (np && np->frag_size < mtu) {
@@ -584,6 +583,9 @@
 	}
 	mtu -= hlen + sizeof(struct frag_hdr);
 
+	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
+				    &ipv6_hdr(skb)->saddr);
+
 	if (skb_has_frag_list(skb)) {
 		int first_len = skb_pagelen(skb);
 		struct sk_buff *frag2;
@@ -632,11 +634,10 @@
 		skb_reset_network_header(skb);
 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
 
-		ipv6_select_ident(net, fh, rt);
 		fh->nexthdr = nexthdr;
 		fh->reserved = 0;
 		fh->frag_off = htons(IP6_MF);
-		frag_id = fh->identification;
+		fh->identification = frag_id;
 
 		first_len = skb_pagelen(skb);
 		skb->data_len = first_len - skb_headlen(skb);
@@ -778,11 +779,7 @@
 		 */
 		fh->nexthdr = nexthdr;
 		fh->reserved = 0;
-		if (!frag_id) {
-			ipv6_select_ident(net, fh, rt);
-			frag_id = fh->identification;
-		} else
-			fh->identification = frag_id;
+		fh->identification = frag_id;
 
 		/*
 		 *	Copy a block of the IP datagram.
@@ -815,6 +812,14 @@
 	consume_skb(skb);
 	return err;
 
+fail_toobig:
+	if (skb->sk && dst_allfrag(skb_dst(skb)))
+		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+
+	skb->dev = skb_dst(skb)->dev;
+	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+	err = -EMSGSIZE;
+
 fail:
 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 		      IPSTATS_MIB_FRAGFAILS);
@@ -936,7 +941,8 @@
 	 */
 	rt = (struct rt6_info *) *dst;
 	rcu_read_lock_bh();
-	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
+	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
+				      rt6_nexthop(rt, &fl6->daddr));
 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
 	rcu_read_unlock_bh();
 
@@ -1060,11 +1066,10 @@
 			int odd, struct sk_buff *skb),
 			void *from, int length, int hh_len, int fragheaderlen,
 			int transhdrlen, int mtu, unsigned int flags,
-			struct rt6_info *rt)
+			const struct flowi6 *fl6)
 
 {
 	struct sk_buff *skb;
-	struct frag_hdr fhdr;
 	int err;
 
 	/* There is support for UDP large send offload by network
@@ -1106,8 +1111,9 @@
 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
 				     sizeof(struct frag_hdr)) & ~7;
 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-	ipv6_select_ident(sock_net(sk), &fhdr, rt);
-	skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+	skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
+							 &fl6->daddr,
+							 &fl6->saddr);
 
 append:
 	return skb_append_datato_frags(sk, skb, getfrag, from,
@@ -1332,7 +1338,7 @@
 	    (sk->sk_type == SOCK_DGRAM)) {
 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
 					  hh_len, fragheaderlen,
-					  transhdrlen, mtu, flags, rt);
+					  transhdrlen, mtu, flags, fl6);
 		if (err)
 			goto error;
 		return 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5cafd92..2e67b66 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -151,7 +151,7 @@
 void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 {
 	struct rt6_info *rt = (struct rt6_info *) dst;
-	t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+	t->dst_cookie = rt6_get_cookie(rt);
 	dst_release(t->dst_cache);
 	t->dst_cache = dst;
 }
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index bba8903..e1a1136 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -19,12 +19,10 @@
 	int err;
 	struct socket *sock = NULL;
 
-	err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
+	err = sock_create_kern(net, AF_INET6, SOCK_DGRAM, 0, &sock);
 	if (err < 0)
 		goto error;
 
-	sk_change_net(sock->sk, net);
-
 	udp6_addr.sin6_family = AF_INET6;
 	memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
 	       sizeof(udp6_addr.sin6_addr));
@@ -55,7 +53,7 @@
 error:
 	if (sock) {
 		kernel_sock_shutdown(sock, SHUT_RDWR);
-		sk_release_kernel(sock->sk);
+		sock_release(sock);
 	}
 	*sockp = NULL;
 	return err;
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
new file mode 100644
index 0000000..df8afe5
--- /dev/null
+++ b/net/ipv6/mcast_snoop.c
@@ -0,0 +1,213 @@
+/* Copyright (C) 2010: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ * Copyright (C) 2015: Linus Lüssing <linus.luessing@c0d3.blue>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Based on the MLD support added to br_multicast.c by YOSHIFUJI Hideaki.
+ */
+
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <net/mld.h>
+#include <net/addrconf.h>
+#include <net/ip6_checksum.h>
+
+static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
+{
+	const struct ipv6hdr *ip6h;
+	unsigned int len;
+	unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h);
+
+	if (!pskb_may_pull(skb, offset))
+		return -EINVAL;
+
+	ip6h = ipv6_hdr(skb);
+
+	if (ip6h->version != 6)
+		return -EINVAL;
+
+	len = offset + ntohs(ip6h->payload_len);
+	if (skb->len < len || len <= offset)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ipv6_mc_check_exthdrs(struct sk_buff *skb)
+{
+	const struct ipv6hdr *ip6h;
+	int offset;
+	u8 nexthdr;
+	__be16 frag_off;
+
+	ip6h = ipv6_hdr(skb);
+
+	if (ip6h->nexthdr != IPPROTO_HOPOPTS)
+		return -ENOMSG;
+
+	nexthdr = ip6h->nexthdr;
+	offset = skb_network_offset(skb) + sizeof(*ip6h);
+	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+
+	if (offset < 0)
+		return -EINVAL;
+
+	if (nexthdr != IPPROTO_ICMPV6)
+		return -ENOMSG;
+
+	skb_set_transport_header(skb, offset);
+
+	return 0;
+}
+
+static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb)
+{
+	unsigned int len = skb_transport_offset(skb);
+
+	len += sizeof(struct mld2_report);
+
+	return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+}
+
+static int ipv6_mc_check_mld_query(struct sk_buff *skb)
+{
+	struct mld_msg *mld;
+	unsigned int len = skb_transport_offset(skb);
+
+	/* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+		return -EINVAL;
+
+	len += sizeof(struct mld_msg);
+	if (skb->len < len)
+		return -EINVAL;
+
+	/* MLDv1? */
+	if (skb->len != len) {
+		/* or MLDv2? */
+		len += sizeof(struct mld2_query) - sizeof(struct mld_msg);
+		if (skb->len < len || !pskb_may_pull(skb, len))
+			return -EINVAL;
+	}
+
+	mld = (struct mld_msg *)skb_transport_header(skb);
+
+	/* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
+	 * all-nodes destination address (ff02::1) for general queries
+	 */
+	if (ipv6_addr_any(&mld->mld_mca) &&
+	    !ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+{
+	struct mld_msg *mld = (struct mld_msg *)skb_transport_header(skb);
+
+	switch (mld->mld_type) {
+	case ICMPV6_MGM_REDUCTION:
+	case ICMPV6_MGM_REPORT:
+		/* fall through */
+		return 0;
+	case ICMPV6_MLD2_REPORT:
+		return ipv6_mc_check_mld_reportv2(skb);
+	case ICMPV6_MGM_QUERY:
+		return ipv6_mc_check_mld_query(skb);
+	default:
+		return -ENOMSG;
+	}
+}
+
+static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
+{
+	return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
+}
+
+static int __ipv6_mc_check_mld(struct sk_buff *skb,
+			       struct sk_buff **skb_trimmed)
+
+{
+	struct sk_buff *skb_chk = NULL;
+	unsigned int transport_len;
+	unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
+	int ret;
+
+	transport_len = ntohs(ipv6_hdr(skb)->payload_len);
+	transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
+
+	skb_get(skb);
+	skb_chk = skb_checksum_trimmed(skb, transport_len,
+				       ipv6_mc_validate_checksum);
+	if (!skb_chk)
+		return -EINVAL;
+
+	if (!pskb_may_pull(skb_chk, len)) {
+		kfree_skb(skb_chk);
+		return -EINVAL;
+	}
+
+	ret = ipv6_mc_check_mld_msg(skb_chk);
+	if (ret) {
+		kfree_skb(skb_chk);
+		return ret;
+	}
+
+	if (skb_trimmed)
+		*skb_trimmed = skb_chk;
+	else
+		kfree_skb(skb_chk);
+
+	return 0;
+}
+
+/**
+ * ipv6_mc_check_mld - checks whether this is a sane MLD packet
+ * @skb: the skb to validate
+ * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
+ *
+ * Checks whether an IPv6 packet is a valid MLD packet. If so sets
+ * skb network and transport headers accordingly and returns zero.
+ *
+ * -EINVAL: A broken packet was detected, i.e. it violates some internet
+ *  standard
+ * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
+ * -ENOMEM: A memory allocation failure happened.
+ *
+ * Optionally, an skb pointer might be provided via skb_trimmed (or set it
+ * to NULL): After parsing an MLD packet successfully it will point to
+ * an skb which has its tail aligned to the IP packet end. This might
+ * either be the originally provided skb or a trimmed, cloned version if
+ * the skb frame had data beyond the IP packet. A cloned skb allows us
+ * to leave the original skb and its full frame unchanged (which might be
+ * desirable for layer 2 frame jugglers).
+ *
+ * The caller needs to release a reference count from any returned skb_trimmed.
+ */
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+{
+	int ret;
+
+	ret = ipv6_mc_check_ip6hdr(skb);
+	if (ret < 0)
+		return ret;
+
+	ret = ipv6_mc_check_exthdrs(skb);
+	if (ret < 0)
+		return ret;
+
+	return __ipv6_mc_check_mld(skb, skb_trimmed);
+}
+EXPORT_SYMBOL(ipv6_mc_check_mld);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 96f153c..0a05b35 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1506,7 +1506,7 @@
 			  "Redirect: destination is not a neighbour\n");
 		goto release;
 	}
-	peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
 	ret = inet_peer_xrlim_allow(peer, 1*HZ);
 	if (peer)
 		inet_putpeer(peer);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d958718..b4de08a 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -191,6 +191,8 @@
 
 static const struct nf_ipv6_ops ipv6ops = {
 	.chk_addr	= ipv6_chk_addr,
+	.route_input    = ip6_route_input,
+	.fragment	= ip6_fragment
 };
 
 static const struct nf_afinfo nf_ip6_afinfo = {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index ca69983..b552cf0 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -186,7 +186,8 @@
 
 config IP6_NF_MATCH_RPFILTER
 	tristate '"rpfilter" reverse path filter match support'
-	depends on NETFILTER_ADVANCED && (IP6_NF_MANGLE || IP6_NF_RAW)
+	depends on NETFILTER_ADVANCED
+	depends on IP6_NF_MANGLE || IP6_NF_RAW
 	---help---
 	  This option allows you to match packets whose replies would
 	  go out via the interface the packet came in.
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 62f5b0d..3c35ced 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -283,15 +283,13 @@
 			 const struct xt_table_info *private,
 			 const struct ip6t_entry *e)
 {
-	const void *table_base;
 	const struct ip6t_entry *root;
 	const char *hookname, *chainname, *comment;
 	const struct ip6t_entry *iter;
 	unsigned int rulenum = 0;
 	struct net *net = dev_net(in ? in : out);
 
-	table_base = private->entries[smp_processor_id()];
-	root = get_entry(table_base, private->hook_entry[hook]);
+	root = get_entry(private->entries, private->hook_entry[hook]);
 
 	hookname = chainname = hooknames[hook];
 	comment = comments[NF_IP6_TRACE_COMMENT_RULE];
@@ -357,7 +355,7 @@
 	 */
 	smp_read_barrier_depends();
 	cpu        = smp_processor_id();
-	table_base = private->entries[cpu];
+	table_base = private->entries;
 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
 	stackptr   = per_cpu_ptr(private->stackptr, cpu);
 	origptr    = *stackptr;
@@ -367,6 +365,7 @@
 	do {
 		const struct xt_entry_target *t;
 		const struct xt_entry_match *ematch;
+		struct xt_counters *counter;
 
 		IP_NF_ASSERT(e);
 		acpar.thoff = 0;
@@ -384,7 +383,8 @@
 				goto no_match;
 		}
 
-		ADD_COUNTER(e->counters, skb->len, 1);
+		counter = xt_get_this_cpu_counter(&e->counters);
+		ADD_COUNTER(*counter, skb->len, 1);
 
 		t = ip6t_get_target_c(e);
 		IP_NF_ASSERT(t->u.kernel.target);
@@ -679,6 +679,10 @@
 	if (ret)
 		return ret;
 
+	e->counters.pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(e->counters.pcnt))
+		return -ENOMEM;
+
 	j = 0;
 	mtpar.net	= net;
 	mtpar.table     = name;
@@ -714,6 +718,9 @@
 			break;
 		cleanup_match(ematch, net);
 	}
+
+	xt_percpu_counter_free(e->counters.pcnt);
+
 	return ret;
 }
 
@@ -797,6 +804,8 @@
 	if (par.target->destroy != NULL)
 		par.target->destroy(&par);
 	module_put(par.target->me);
+
+	xt_percpu_counter_free(e->counters.pcnt);
 }
 
 /* Checks and translates the user-supplied table segment (held in
@@ -879,12 +888,6 @@
 		return ret;
 	}
 
-	/* And one copy for every other CPU */
-	for_each_possible_cpu(i) {
-		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-			memcpy(newinfo->entries[i], entry0, newinfo->size);
-	}
-
 	return ret;
 }
 
@@ -900,14 +903,16 @@
 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
 		i = 0;
-		xt_entry_foreach(iter, t->entries[cpu], t->size) {
+		xt_entry_foreach(iter, t->entries, t->size) {
+			struct xt_counters *tmp;
 			u64 bcnt, pcnt;
 			unsigned int start;
 
+			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
 			do {
 				start = read_seqcount_begin(s);
-				bcnt = iter->counters.bcnt;
-				pcnt = iter->counters.pcnt;
+				bcnt = tmp->bcnt;
+				pcnt = tmp->pcnt;
 			} while (read_seqcount_retry(s, start));
 
 			ADD_COUNTER(counters[i], bcnt, pcnt);
@@ -952,11 +957,7 @@
 	if (IS_ERR(counters))
 		return PTR_ERR(counters);
 
-	/* choose the copy that is on our node/cpu, ...
-	 * This choice is lazy (because current thread is
-	 * allowed to migrate to another cpu)
-	 */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
+	loc_cpu_entry = private->entries;
 	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
 		ret = -EFAULT;
 		goto free_counters;
@@ -1064,16 +1065,16 @@
 			     struct xt_table_info *newinfo)
 {
 	struct ip6t_entry *iter;
-	void *loc_cpu_entry;
+	const void *loc_cpu_entry;
 	int ret;
 
 	if (!newinfo || !info)
 		return -EINVAL;
 
-	/* we dont care about newinfo->entries[] */
+	/* we dont care about newinfo->entries */
 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 	newinfo->initial_entries = 0;
-	loc_cpu_entry = info->entries[raw_smp_processor_id()];
+	loc_cpu_entry = info->entries;
 	xt_compat_init_offsets(AF_INET6, info->number);
 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
@@ -1194,7 +1195,6 @@
 	struct xt_table *t;
 	struct xt_table_info *oldinfo;
 	struct xt_counters *counters;
-	const void *loc_cpu_old_entry;
 	struct ip6t_entry *iter;
 
 	ret = 0;
@@ -1237,8 +1237,7 @@
 	get_counters(oldinfo, counters);
 
 	/* Decrease module usage counts and free resource */
-	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
-	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+	xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
 		cleanup_entry(iter, net);
 
 	xt_free_table_info(oldinfo);
@@ -1284,8 +1283,7 @@
 	if (!newinfo)
 		return -ENOMEM;
 
-	/* choose the copy that is on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
 			   tmp.size) != 0) {
 		ret = -EFAULT;
@@ -1316,7 +1314,7 @@
 do_add_counters(struct net *net, const void __user *user, unsigned int len,
 		int compat)
 {
-	unsigned int i, curcpu;
+	unsigned int i;
 	struct xt_counters_info tmp;
 	struct xt_counters *paddc;
 	unsigned int num_counters;
@@ -1326,7 +1324,6 @@
 	struct xt_table *t;
 	const struct xt_table_info *private;
 	int ret = 0;
-	const void *loc_cpu_entry;
 	struct ip6t_entry *iter;
 	unsigned int addend;
 #ifdef CONFIG_COMPAT
@@ -1374,7 +1371,6 @@
 		goto free;
 	}
 
-
 	local_bh_disable();
 	private = t->private;
 	if (private->number != num_counters) {
@@ -1383,16 +1379,15 @@
 	}
 
 	i = 0;
-	/* Choose the copy that is on our node */
-	curcpu = smp_processor_id();
 	addend = xt_write_recseq_begin();
-	loc_cpu_entry = private->entries[curcpu];
-	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
-		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+	xt_entry_foreach(iter, private->entries, private->size) {
+		struct xt_counters *tmp;
+
+		tmp = xt_get_this_cpu_counter(&iter->counters);
+		ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
 		++i;
 	}
 	xt_write_recseq_end(addend);
-
  unlock_up_free:
 	local_bh_enable();
 	xt_table_unlock(t);
@@ -1459,7 +1454,6 @@
 compat_find_calc_match(struct xt_entry_match *m,
 		       const char *name,
 		       const struct ip6t_ip6 *ipv6,
-		       unsigned int hookmask,
 		       int *size)
 {
 	struct xt_match *match;
@@ -1528,8 +1522,7 @@
 	entry_offset = (void *)e - (void *)base;
 	j = 0;
 	xt_ematch_foreach(ematch, e) {
-		ret = compat_find_calc_match(ematch, name,
-					     &e->ipv6, e->comefrom, &off);
+		ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
 		if (ret != 0)
 			goto release_matches;
 		++j;
@@ -1623,6 +1616,9 @@
 	struct xt_mtchk_param mtpar;
 	struct xt_entry_match *ematch;
 
+	e->counters.pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(e->counters.pcnt))
+		return -ENOMEM;
 	j = 0;
 	mtpar.net	= net;
 	mtpar.table     = name;
@@ -1647,6 +1643,9 @@
 			break;
 		cleanup_match(ematch, net);
 	}
+
+	xt_percpu_counter_free(e->counters.pcnt);
+
 	return ret;
 }
 
@@ -1731,7 +1730,7 @@
 		newinfo->hook_entry[i] = info->hook_entry[i];
 		newinfo->underflow[i] = info->underflow[i];
 	}
-	entry1 = newinfo->entries[raw_smp_processor_id()];
+	entry1 = newinfo->entries;
 	pos = entry1;
 	size = total_size;
 	xt_entry_foreach(iter0, entry0, total_size) {
@@ -1783,11 +1782,6 @@
 		return ret;
 	}
 
-	/* And one copy for every other CPU */
-	for_each_possible_cpu(i)
-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-			memcpy(newinfo->entries[i], entry1, newinfo->size);
-
 	*pinfo = newinfo;
 	*pentry0 = entry1;
 	xt_free_table_info(info);
@@ -1834,8 +1828,7 @@
 	if (!newinfo)
 		return -ENOMEM;
 
-	/* choose the copy that is on our node/cpu */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
 			   tmp.size) != 0) {
 		ret = -EFAULT;
@@ -1906,7 +1899,6 @@
 	void __user *pos;
 	unsigned int size;
 	int ret = 0;
-	const void *loc_cpu_entry;
 	unsigned int i = 0;
 	struct ip6t_entry *iter;
 
@@ -1914,14 +1906,9 @@
 	if (IS_ERR(counters))
 		return PTR_ERR(counters);
 
-	/* choose the copy that is on our node/cpu, ...
-	 * This choice is lazy (because current thread is
-	 * allowed to migrate to another cpu)
-	 */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
 	pos = userptr;
 	size = total_size;
-	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+	xt_entry_foreach(iter, private->entries, total_size) {
 		ret = compat_copy_entry_to_user(iter, &pos,
 						&size, counters, i++);
 		if (ret != 0)
@@ -2096,8 +2083,7 @@
 		goto out;
 	}
 
-	/* choose the copy on our node/cpu, but dont care about preemption */
-	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+	loc_cpu_entry = newinfo->entries;
 	memcpy(loc_cpu_entry, repl->entries, repl->size);
 
 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
@@ -2127,7 +2113,7 @@
 	private = xt_unregister_table(table);
 
 	/* Decrease module usage counts and free resources */
-	loc_cpu_entry = private->entries[raw_smp_processor_id()];
+	loc_cpu_entry = private->entries;
 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
 		cleanup_entry(iter, net);
 	if (private->number > private->initial_entries)
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 85892af..928a0fb 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -8,9 +8,11 @@
 #include <net/ip6_fib.h>
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
+#include <linux/netfilter.h>
 
 static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
-			       struct in6_addr *dst, struct in6_addr *src)
+			       const struct in6_addr *dst,
+			       const struct in6_addr *src)
 {
 	u32 hash, id;
 
@@ -60,17 +62,17 @@
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
-void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
-		       struct rt6_info *rt)
+__be32 ipv6_select_ident(struct net *net,
+			 const struct in6_addr *daddr,
+			 const struct in6_addr *saddr)
 {
 	static u32 ip6_idents_hashrnd __read_mostly;
 	u32 id;
 
 	net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
-	id = __ipv6_select_ident(net, ip6_idents_hashrnd, &rt->rt6i_dst.addr,
-				 &rt->rt6i_src.addr);
-	fhdr->identification = htonl(id);
+	id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
+	return htonl(id);
 }
 EXPORT_SYMBOL(ipv6_select_ident);
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8072bd4..ca4700c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -865,6 +865,9 @@
 		fl6.flowi6_oif = np->ucast_oif;
 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
+	if (inet->hdrincl)
+		fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+
 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 	if (IS_ERR(dst)) {
 		err = PTR_ERR(dst);
@@ -1324,13 +1327,7 @@
 
 int __init rawv6_init(void)
 {
-	int ret;
-
-	ret = inet6_register_protosw(&rawv6_protosw);
-	if (ret)
-		goto out;
-out:
-	return ret;
+	return inet6_register_protosw(&rawv6_protosw);
 }
 
 void rawv6_exit(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c73ae50..1a1122a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,7 @@
 	RT6_NUD_SUCCEED = 1
 };
 
-static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
-				    const struct in6_addr *dest);
+static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
 static unsigned int	 ip6_mtu(const struct dst_entry *dst);
@@ -92,6 +91,7 @@
 					   struct sk_buff *skb, u32 mtu);
 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
 					struct sk_buff *skb);
+static void		rt6_dst_from_metrics_check(struct rt6_info *rt);
 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -104,65 +104,82 @@
 					   const struct in6_addr *gwaddr, int ifindex);
 #endif
 
-static void rt6_bind_peer(struct rt6_info *rt, int create)
+struct uncached_list {
+	spinlock_t		lock;
+	struct list_head	head;
+};
+
+static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
+
+static void rt6_uncached_list_add(struct rt6_info *rt)
 {
-	struct inet_peer_base *base;
-	struct inet_peer *peer;
+	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
 
-	base = inetpeer_base_ptr(rt->_rt6i_peer);
-	if (!base)
-		return;
+	rt->dst.flags |= DST_NOCACHE;
+	rt->rt6i_uncached_list = ul;
 
-	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
-	if (peer) {
-		if (!rt6_set_peer(rt, peer))
-			inet_putpeer(peer);
+	spin_lock_bh(&ul->lock);
+	list_add_tail(&rt->rt6i_uncached, &ul->head);
+	spin_unlock_bh(&ul->lock);
+}
+
+static void rt6_uncached_list_del(struct rt6_info *rt)
+{
+	if (!list_empty(&rt->rt6i_uncached)) {
+		struct uncached_list *ul = rt->rt6i_uncached_list;
+
+		spin_lock_bh(&ul->lock);
+		list_del(&rt->rt6i_uncached);
+		spin_unlock_bh(&ul->lock);
 	}
 }
 
-static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
+static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
 {
-	if (rt6_has_peer(rt))
-		return rt6_peer_ptr(rt);
+	struct net_device *loopback_dev = net->loopback_dev;
+	int cpu;
 
-	rt6_bind_peer(rt, create);
-	return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
+	for_each_possible_cpu(cpu) {
+		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
+		struct rt6_info *rt;
+
+		spin_lock_bh(&ul->lock);
+		list_for_each_entry(rt, &ul->head, rt6i_uncached) {
+			struct inet6_dev *rt_idev = rt->rt6i_idev;
+			struct net_device *rt_dev = rt->dst.dev;
+
+			if (rt_idev && (rt_idev->dev == dev || !dev) &&
+			    rt_idev->dev != loopback_dev) {
+				rt->rt6i_idev = in6_dev_get(loopback_dev);
+				in6_dev_put(rt_idev);
+			}
+
+			if (rt_dev && (rt_dev == dev || !dev) &&
+			    rt_dev != loopback_dev) {
+				rt->dst.dev = loopback_dev;
+				dev_hold(rt->dst.dev);
+				dev_put(rt_dev);
+			}
+		}
+		spin_unlock_bh(&ul->lock);
+	}
 }
 
-static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
+static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
 {
-	return __rt6_get_peer(rt, 1);
+	return dst_metrics_write_ptr(rt->dst.from);
 }
 
 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
 {
-	struct rt6_info *rt = (struct rt6_info *) dst;
-	struct inet_peer *peer;
-	u32 *p = NULL;
+	struct rt6_info *rt = (struct rt6_info *)dst;
 
-	if (!(rt->dst.flags & DST_HOST))
+	if (rt->rt6i_flags & RTF_PCPU)
+		return rt6_pcpu_cow_metrics(rt);
+	else if (rt->rt6i_flags & RTF_CACHE)
+		return NULL;
+	else
 		return dst_cow_metrics_generic(dst, old);
-
-	peer = rt6_get_peer_create(rt);
-	if (peer) {
-		u32 *old_p = __DST_METRICS_PTR(old);
-		unsigned long prev, new;
-
-		p = peer->metrics;
-		if (inet_metrics_new(peer) ||
-		    (old & DST_METRICS_FORCE_OVERWRITE))
-			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
-
-		new = (unsigned long) p;
-		prev = cmpxchg(&dst->_metrics, old, new);
-
-		if (prev != old) {
-			p = __DST_METRICS_PTR(prev);
-			if (prev & DST_METRICS_READ_ONLY)
-				p = NULL;
-		}
-	}
-	return p;
 }
 
 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
@@ -299,10 +316,10 @@
 #endif
 
 /* allocate dst with ip6_dst_ops */
-static inline struct rt6_info *ip6_dst_alloc(struct net *net,
-					     struct net_device *dev,
-					     int flags,
-					     struct fib6_table *table)
+static struct rt6_info *__ip6_dst_alloc(struct net *net,
+					struct net_device *dev,
+					int flags,
+					struct fib6_table *table)
 {
 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
 					0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -311,21 +328,54 @@
 		struct dst_entry *dst = &rt->dst;
 
 		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
-		rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
 		INIT_LIST_HEAD(&rt->rt6i_siblings);
+		INIT_LIST_HEAD(&rt->rt6i_uncached);
 	}
 	return rt;
 }
 
+static struct rt6_info *ip6_dst_alloc(struct net *net,
+				      struct net_device *dev,
+				      int flags,
+				      struct fib6_table *table)
+{
+	struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+
+	if (rt) {
+		rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
+		if (rt->rt6i_pcpu) {
+			int cpu;
+
+			for_each_possible_cpu(cpu) {
+				struct rt6_info **p;
+
+				p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
+				/* no one shares rt */
+				*p =  NULL;
+			}
+		} else {
+			dst_destroy((struct dst_entry *)rt);
+			return NULL;
+		}
+	}
+
+	return rt;
+}
+
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
 	struct rt6_info *rt = (struct rt6_info *)dst;
-	struct inet6_dev *idev = rt->rt6i_idev;
 	struct dst_entry *from = dst->from;
+	struct inet6_dev *idev;
 
-	if (!(rt->dst.flags & DST_HOST))
-		dst_destroy_metrics_generic(dst);
+	dst_destroy_metrics_generic(dst);
 
+	if (rt->rt6i_pcpu)
+		free_percpu(rt->rt6i_pcpu);
+
+	rt6_uncached_list_del(rt);
+
+	idev = rt->rt6i_idev;
 	if (idev) {
 		rt->rt6i_idev = NULL;
 		in6_dev_put(idev);
@@ -333,11 +383,6 @@
 
 	dst->from = NULL;
 	dst_release(from);
-
-	if (rt6_has_peer(rt)) {
-		struct inet_peer *peer = rt6_peer_ptr(rt);
-		inet_putpeer(peer);
-	}
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -652,15 +697,33 @@
 				     u32 metric, int oif, int strict,
 				     bool *do_rr)
 {
-	struct rt6_info *rt, *match;
+	struct rt6_info *rt, *match, *cont;
 	int mpri = -1;
 
 	match = NULL;
-	for (rt = rr_head; rt && rt->rt6i_metric == metric;
-	     rt = rt->dst.rt6_next)
+	cont = NULL;
+	for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
+		if (rt->rt6i_metric != metric) {
+			cont = rt;
+			break;
+		}
+
 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
-	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
-	     rt = rt->dst.rt6_next)
+	}
+
+	for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
+		if (rt->rt6i_metric != metric) {
+			cont = rt;
+			break;
+		}
+
+		match = find_match(rt, oif, strict, &mpri, match, do_rr);
+	}
+
+	if (match || !cont)
+		return match;
+
+	for (rt = cont; rt; rt = rt->dst.rt6_next)
 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
 
 	return match;
@@ -694,6 +757,11 @@
 	return match ? match : net->ipv6.ip6_null_entry;
 }
 
+static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
+{
+	return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
+}
+
 #ifdef CONFIG_IPV6_ROUTE_INFO
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
 		  const struct in6_addr *gwaddr)
@@ -872,9 +940,9 @@
 	return __ip6_ins_rt(rt, &info, &mxc);
 }
 
-static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
-				      const struct in6_addr *daddr,
-				      const struct in6_addr *saddr)
+static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
+					   const struct in6_addr *daddr,
+					   const struct in6_addr *saddr)
 {
 	struct rt6_info *rt;
 
@@ -882,15 +950,26 @@
 	 *	Clone the route.
 	 */
 
-	rt = ip6_rt_copy(ort, daddr);
+	if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
+		ort = (struct rt6_info *)ort->dst.from;
 
-	if (rt) {
+	rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
+			     0, ort->rt6i_table);
+
+	if (!rt)
+		return NULL;
+
+	ip6_rt_copy_init(rt, ort);
+	rt->rt6i_flags |= RTF_CACHE;
+	rt->rt6i_metric = 0;
+	rt->dst.flags |= DST_HOST;
+	rt->rt6i_dst.addr = *daddr;
+	rt->rt6i_dst.plen = 128;
+
+	if (!rt6_is_gw_or_nonexthop(ort)) {
 		if (ort->rt6i_dst.plen != 128 &&
 		    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
 			rt->rt6i_flags |= RTF_ANYCAST;
-
-		rt->rt6i_flags |= RTF_CACHE;
-
 #ifdef CONFIG_IPV6_SUBTREES
 		if (rt->rt6i_src.plen && saddr) {
 			rt->rt6i_src.addr = *saddr;
@@ -902,30 +981,65 @@
 	return rt;
 }
 
-static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
-					const struct in6_addr *daddr)
+static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
 {
-	struct rt6_info *rt = ip6_rt_copy(ort, daddr);
+	struct rt6_info *pcpu_rt;
 
-	if (rt)
-		rt->rt6i_flags |= RTF_CACHE;
-	return rt;
+	pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
+				  rt->dst.dev, rt->dst.flags,
+				  rt->rt6i_table);
+
+	if (!pcpu_rt)
+		return NULL;
+	ip6_rt_copy_init(pcpu_rt, rt);
+	pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
+	pcpu_rt->rt6i_flags |= RTF_PCPU;
+	return pcpu_rt;
+}
+
+/* It should be called with read_lock_bh(&tb6_lock) acquired */
+static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
+{
+	struct rt6_info *pcpu_rt, *prev, **p;
+
+	p = this_cpu_ptr(rt->rt6i_pcpu);
+	pcpu_rt = *p;
+
+	if (pcpu_rt)
+		goto done;
+
+	pcpu_rt = ip6_rt_pcpu_alloc(rt);
+	if (!pcpu_rt) {
+		struct net *net = dev_net(rt->dst.dev);
+
+		pcpu_rt = net->ipv6.ip6_null_entry;
+		goto done;
+	}
+
+	prev = cmpxchg(p, NULL, pcpu_rt);
+	if (prev) {
+		/* If someone did it before us, return prev instead */
+		dst_destroy(&pcpu_rt->dst);
+		pcpu_rt = prev;
+	}
+
+done:
+	dst_hold(&pcpu_rt->dst);
+	rt6_dst_from_metrics_check(pcpu_rt);
+	return pcpu_rt;
 }
 
 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
 				      struct flowi6 *fl6, int flags)
 {
 	struct fib6_node *fn, *saved_fn;
-	struct rt6_info *rt, *nrt;
+	struct rt6_info *rt;
 	int strict = 0;
-	int attempts = 3;
-	int err;
 
 	strict |= flags & RT6_LOOKUP_F_IFACE;
 	if (net->ipv6.devconf_all->forwarding == 0)
 		strict |= RT6_LOOKUP_F_REACHABLE;
 
-redo_fib6_lookup_lock:
 	read_lock_bh(&table->tb6_lock);
 
 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
@@ -944,51 +1058,52 @@
 			strict &= ~RT6_LOOKUP_F_REACHABLE;
 			fn = saved_fn;
 			goto redo_rt6_select;
-		} else {
-			dst_hold(&rt->dst);
-			read_unlock_bh(&table->tb6_lock);
-			goto out2;
 		}
 	}
 
-	dst_hold(&rt->dst);
-	read_unlock_bh(&table->tb6_lock);
 
-	if (rt->rt6i_flags & RTF_CACHE)
-		goto out2;
+	if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
+		dst_use(&rt->dst, jiffies);
+		read_unlock_bh(&table->tb6_lock);
 
-	if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
-		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
-	else if (!(rt->dst.flags & DST_HOST))
-		nrt = rt6_alloc_clone(rt, &fl6->daddr);
-	else
-		goto out2;
+		rt6_dst_from_metrics_check(rt);
+		return rt;
+	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
+			    !(rt->rt6i_flags & RTF_GATEWAY))) {
+		/* Create a RTF_CACHE clone which will not be
+		 * owned by the fib6 tree.  It is for the special case where
+		 * the daddr in the skb during the neighbor look-up is different
+		 * from the fl6->daddr used to look-up route here.
+		 */
 
-	ip6_rt_put(rt);
-	rt = nrt ? : net->ipv6.ip6_null_entry;
+		struct rt6_info *uncached_rt;
 
-	dst_hold(&rt->dst);
-	if (nrt) {
-		err = ip6_ins_rt(nrt);
-		if (!err)
-			goto out2;
+		dst_use(&rt->dst, jiffies);
+		read_unlock_bh(&table->tb6_lock);
+
+		uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
+		dst_release(&rt->dst);
+
+		if (uncached_rt)
+			rt6_uncached_list_add(uncached_rt);
+		else
+			uncached_rt = net->ipv6.ip6_null_entry;
+
+		dst_hold(&uncached_rt->dst);
+		return uncached_rt;
+
+	} else {
+		/* Get a percpu copy */
+
+		struct rt6_info *pcpu_rt;
+
+		rt->dst.lastuse = jiffies;
+		rt->dst.__use++;
+		pcpu_rt = rt6_get_pcpu_route(rt);
+		read_unlock_bh(&table->tb6_lock);
+
+		return pcpu_rt;
 	}
-
-	if (--attempts <= 0)
-		goto out2;
-
-	/*
-	 * Race condition! In the gap, when table->tb6_lock was
-	 * released someone could insert this route.  Relookup.
-	 */
-	ip6_rt_put(rt);
-	goto redo_fib6_lookup_lock;
-
-out2:
-	rt->dst.lastuse = jiffies;
-	rt->dst.__use++;
-
-	return rt;
 }
 
 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
@@ -1059,7 +1174,6 @@
 		new = &rt->dst;
 
 		memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
-		rt6_init_peer(rt, net->ipv6.peers);
 
 		new->__use = 1;
 		new->input = dst_discard;
@@ -1093,6 +1207,33 @@
  *	Destination cache support functions
  */
 
+static void rt6_dst_from_metrics_check(struct rt6_info *rt)
+{
+	if (rt->dst.from &&
+	    dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
+		dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
+}
+
+static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
+{
+	if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+		return NULL;
+
+	if (rt6_check_expired(rt))
+		return NULL;
+
+	return &rt->dst;
+}
+
+static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
+{
+	if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+	    rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+		return &rt->dst;
+	else
+		return NULL;
+}
+
 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 {
 	struct rt6_info *rt;
@@ -1103,13 +1244,13 @@
 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
 	 * into this function always.
 	 */
-	if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
-		return NULL;
 
-	if (rt6_check_expired(rt))
-		return NULL;
+	rt6_dst_from_metrics_check(rt);
 
-	return dst;
+	if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
+		return rt6_dst_from_check(rt, cookie);
+	else
+		return rt6_check(rt, cookie);
 }
 
 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
@@ -1148,24 +1289,63 @@
 	}
 }
 
-static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-			       struct sk_buff *skb, u32 mtu)
+static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
+{
+	struct net *net = dev_net(rt->dst.dev);
+
+	rt->rt6i_flags |= RTF_MODIFIED;
+	rt->rt6i_pmtu = mtu;
+	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+}
+
+static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+				 const struct ipv6hdr *iph, u32 mtu)
 {
 	struct rt6_info *rt6 = (struct rt6_info *)dst;
 
+	if (rt6->rt6i_flags & RTF_LOCAL)
+		return;
+
 	dst_confirm(dst);
-	if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
-		struct net *net = dev_net(dst->dev);
+	mtu = max_t(u32, mtu, IPV6_MIN_MTU);
+	if (mtu >= dst_mtu(dst))
+		return;
 
-		rt6->rt6i_flags |= RTF_MODIFIED;
-		if (mtu < IPV6_MIN_MTU)
-			mtu = IPV6_MIN_MTU;
+	if (rt6->rt6i_flags & RTF_CACHE) {
+		rt6_do_update_pmtu(rt6, mtu);
+	} else {
+		const struct in6_addr *daddr, *saddr;
+		struct rt6_info *nrt6;
 
-		dst_metric_set(dst, RTAX_MTU, mtu);
-		rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
+		if (iph) {
+			daddr = &iph->daddr;
+			saddr = &iph->saddr;
+		} else if (sk) {
+			daddr = &sk->sk_v6_daddr;
+			saddr = &inet6_sk(sk)->saddr;
+		} else {
+			return;
+		}
+		nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
+		if (nrt6) {
+			rt6_do_update_pmtu(nrt6, mtu);
+
+			/* ip6_ins_rt(nrt6) will bump the
+			 * rt6->rt6i_node->fn_sernum
+			 * which will fail the next rt6_check() and
+			 * invalidate the sk->sk_dst_cache.
+			 */
+			ip6_ins_rt(nrt6);
+		}
 	}
 }
 
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+			       struct sk_buff *skb, u32 mtu)
+{
+	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+}
+
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 		     int oif, u32 mark)
 {
@@ -1182,7 +1362,7 @@
 
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (!dst->error)
-		ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
+		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
 	dst_release(dst);
 }
 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
@@ -1341,12 +1521,17 @@
 
 static unsigned int ip6_mtu(const struct dst_entry *dst)
 {
+	const struct rt6_info *rt = (const struct rt6_info *)dst;
+	unsigned int mtu = rt->rt6i_pmtu;
 	struct inet6_dev *idev;
-	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
 
 	if (mtu)
 		goto out;
 
+	mtu = dst_metric_raw(dst, RTAX_MTU);
+	if (mtu)
+		goto out;
+
 	mtu = IPV6_MIN_MTU;
 
 	rcu_read_lock();
@@ -1590,10 +1775,8 @@
 
 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
 	rt->rt6i_dst.plen = cfg->fc_dst_len;
-	if (rt->rt6i_dst.plen == 128) {
+	if (rt->rt6i_dst.plen == 128)
 		rt->dst.flags |= DST_HOST;
-		dst_metrics_set_force_overwrite(&rt->dst);
-	}
 
 #ifdef CONFIG_IPV6_SUBTREES
 	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1651,6 +1834,16 @@
 		int gwa_type;
 
 		gw_addr = &cfg->fc_gateway;
+
+		/* if gw_addr is local we will fail to detect this in case
+		 * address is still TENTATIVE (DAD in progress). rt6_lookup()
+		 * will return already-added prefix route via interface that
+		 * prefix route was assigned to, which might be non-loopback.
+		 */
+		err = -EINVAL;
+		if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+			goto out;
+
 		rt->rt6i_gateway = *gw_addr;
 		gwa_type = ipv6_addr_type(gw_addr);
 
@@ -1664,7 +1857,6 @@
 			   (SIT, PtP, NBMA NOARP links) it is handy to allow
 			   some exceptions. --ANK
 			 */
-			err = -EINVAL;
 			if (!(gwa_type & IPV6_ADDR_UNICAST))
 				goto out;
 
@@ -1785,6 +1977,9 @@
 
 	if (fn) {
 		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
+			if ((rt->rt6i_flags & RTF_CACHE) &&
+			    !(cfg->fc_flags & RTF_CACHE))
+				continue;
 			if (cfg->fc_ifindex &&
 			    (!rt->dst.dev ||
 			     rt->dst.dev->ifindex != cfg->fc_ifindex))
@@ -1894,7 +2089,7 @@
 				     NEIGH_UPDATE_F_ISROUTER))
 		     );
 
-	nrt = ip6_rt_copy(rt, &msg->dest);
+	nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
 	if (!nrt)
 		goto out;
 
@@ -1926,42 +2121,35 @@
  *	Misc support functions
  */
 
-static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
-				    const struct in6_addr *dest)
+static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
 {
-	struct net *net = dev_net(ort->dst.dev);
-	struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
-					    ort->rt6i_table);
+	BUG_ON(from->dst.from);
 
-	if (rt) {
-		rt->dst.input = ort->dst.input;
-		rt->dst.output = ort->dst.output;
-		rt->dst.flags |= DST_HOST;
+	rt->rt6i_flags &= ~RTF_EXPIRES;
+	dst_hold(&from->dst);
+	rt->dst.from = &from->dst;
+	dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
+}
 
-		rt->rt6i_dst.addr = *dest;
-		rt->rt6i_dst.plen = 128;
-		dst_copy_metrics(&rt->dst, &ort->dst);
-		rt->dst.error = ort->dst.error;
-		rt->rt6i_idev = ort->rt6i_idev;
-		if (rt->rt6i_idev)
-			in6_dev_hold(rt->rt6i_idev);
-		rt->dst.lastuse = jiffies;
-
-		if (ort->rt6i_flags & RTF_GATEWAY)
-			rt->rt6i_gateway = ort->rt6i_gateway;
-		else
-			rt->rt6i_gateway = *dest;
-		rt->rt6i_flags = ort->rt6i_flags;
-		rt6_set_from(rt, ort);
-		rt->rt6i_metric = 0;
-
+static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
+{
+	rt->dst.input = ort->dst.input;
+	rt->dst.output = ort->dst.output;
+	rt->rt6i_dst = ort->rt6i_dst;
+	rt->dst.error = ort->dst.error;
+	rt->rt6i_idev = ort->rt6i_idev;
+	if (rt->rt6i_idev)
+		in6_dev_hold(rt->rt6i_idev);
+	rt->dst.lastuse = jiffies;
+	rt->rt6i_gateway = ort->rt6i_gateway;
+	rt->rt6i_flags = ort->rt6i_flags;
+	rt6_set_from(rt, ort);
+	rt->rt6i_metric = ort->rt6i_metric;
 #ifdef CONFIG_IPV6_SUBTREES
-		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
+	rt->rt6i_src = ort->rt6i_src;
 #endif
-		memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
-		rt->rt6i_table = ort->rt6i_table;
-	}
-	return rt;
+	rt->rt6i_prefsrc = ort->rt6i_prefsrc;
+	rt->rt6i_table = ort->rt6i_table;
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2336,6 +2524,7 @@
 
 	fib6_clean_all(net, fib6_ifdown, &adn);
 	icmp6_clean_all(fib6_ifdown, &adn);
+	rt6_uncached_list_flush_dev(net, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -2373,11 +2562,20 @@
 	   PMTU discouvery.
 	 */
 	if (rt->dst.dev == arg->dev &&
-	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
-	    (dst_mtu(&rt->dst) >= arg->mtu ||
-	     (dst_mtu(&rt->dst) < arg->mtu &&
-	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
-		dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+	    !dst_metric_locked(&rt->dst, RTAX_MTU)) {
+		if (rt->rt6i_flags & RTF_CACHE) {
+			/* For RTF_CACHE with rt6i_pmtu == 0
+			 * (i.e. a redirected route),
+			 * the metrics of its rt->dst.from has already
+			 * been updated.
+			 */
+			if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
+				rt->rt6i_pmtu = arg->mtu;
+		} else if (dst_mtu(&rt->dst) >= arg->mtu ||
+			   (dst_mtu(&rt->dst) < arg->mtu &&
+			    dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
+			dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+		}
 	}
 	return 0;
 }
@@ -2434,6 +2632,9 @@
 	if (rtm->rtm_type == RTN_LOCAL)
 		cfg->fc_flags |= RTF_LOCAL;
 
+	if (rtm->rtm_flags & RTM_F_CLONED)
+		cfg->fc_flags |= RTF_CACHE;
+
 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
 	cfg->fc_nlinfo.nlh = nlh;
 	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -2608,6 +2809,7 @@
 			 int iif, int type, u32 portid, u32 seq,
 			 int prefix, int nowait, unsigned int flags)
 {
+	u32 metrics[RTAX_MAX];
 	struct rtmsg *rtm;
 	struct nlmsghdr *nlh;
 	long expires;
@@ -2721,7 +2923,10 @@
 			goto nla_put_failure;
 	}
 
-	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
+	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
+	if (rt->rt6i_pmtu)
+		metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
+	if (rtnetlink_put_metrics(skb, metrics) < 0)
 		goto nla_put_failure;
 
 	if (rt->rt6i_flags & RTF_GATEWAY) {
@@ -3216,6 +3421,7 @@
 int __init ip6_route_init(void)
 {
 	int ret;
+	int cpu;
 
 	ret = -ENOMEM;
 	ip6_dst_ops_template.kmem_cachep =
@@ -3275,6 +3481,13 @@
 	if (ret)
 		goto out_register_late_subsys;
 
+	for_each_possible_cpu(cpu) {
+		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
+
+		INIT_LIST_HEAD(&ul->head);
+		spin_lock_init(&ul->lock);
+	}
+
 out:
 	return ret;
 
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 21bc2eb..0909f4e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -41,23 +41,6 @@
 	9000 - 60,
 };
 
-static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
-					   struct request_sock *req,
-					   struct dst_entry *dst)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct sock *child;
-
-	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
-	if (child) {
-		atomic_set(&req->rsk_refcnt, 1);
-		inet_csk_reqsk_queue_add(sk, req, child);
-	} else {
-		reqsk_free(req);
-	}
-	return child;
-}
-
 static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
 		      ipv6_cookie_scratch);
 
@@ -264,7 +247,7 @@
 	ireq->rcv_wscale = rcv_wscale;
 	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
 
-	ret = get_cookie_sock(sk, skb, req, dst);
+	ret = tcp_get_cookie_sock(sk, skb, req, dst);
 out:
 	return ret;
 out_free:
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index abcc79f..4e705ad 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -68,6 +68,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
 	},
+	{
+		.procname	= "flowlabel_state_ranges",
+		.data		= &init_net.ipv6.sysctl.flowlabel_state_ranges,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
 	{ }
 };
 
@@ -109,6 +116,7 @@
 	ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
 	ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
 	ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
+	ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
 
 	ipv6_route_table = ipv6_route_sysctl_init(net);
 	if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3adffb3..6748c42 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -99,8 +99,7 @@
 		dst_hold(dst);
 		sk->sk_rx_dst = dst;
 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
-		if (rt->rt6i_node)
-			inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 	}
 }
 
@@ -121,7 +120,6 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct in6_addr *saddr = NULL, *final_p, final;
-	struct rt6_info *rt;
 	struct flowi6 fl6;
 	struct dst_entry *dst;
 	int addr_type;
@@ -259,10 +257,9 @@
 	sk->sk_gso_type = SKB_GSO_TCPV6;
 	__ip6_dst_store(sk, dst, NULL, NULL);
 
-	rt = (struct rt6_info *) dst;
 	if (tcp_death_row.sysctl_tw_recycle &&
 	    !tp->rx_opt.ts_recent_stamp &&
-	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
+	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 		tcp_fetch_timewait_stamp(sk, dst);
 
 	icsk->icsk_ext_hdr_len = 0;
@@ -1251,7 +1248,7 @@
 		return 0;
 	}
 
-	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+	if (tcp_checksum_complete(skb))
 		goto csum_err;
 
 	if (sk->sk_state == TCP_LISTEN) {
@@ -1421,6 +1418,7 @@
 	skb->dev = NULL;
 
 	bh_lock_sock_nested(sk);
+	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
 	ret = 0;
 	if (!sock_owned_by_user(sk)) {
 		if (!tcp_prequeue(sk, skb))
@@ -1442,7 +1440,7 @@
 
 	tcp_v6_fill_cb(skb, hdr, th);
 
-	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
+	if (tcp_checksum_complete(skb)) {
 csum_error:
 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
 bad_packet:
@@ -1467,10 +1465,6 @@
 
 	tcp_v6_fill_cb(skb, hdr, th);
 
-	if (skb->len < (th->doff<<2)) {
-		inet_twsk_put(inet_twsk(sk));
-		goto bad_packet;
-	}
 	if (tcp_checksum_complete(skb)) {
 		inet_twsk_put(inet_twsk(sk));
 		goto csum_error;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index f337a90..ed0583c1 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -71,20 +71,12 @@
 	return 0;
 }
 
-static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst)
-{
-	struct rt6_info *rt = (struct rt6_info *)xdst;
-
-	rt6_init_peer(rt, net->ipv6.peers);
-}
-
 static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 			   int nfheader_len)
 {
 	if (dst->ops->family == AF_INET6) {
 		struct rt6_info *rt = (struct rt6_info *)dst;
-		if (rt->rt6i_node)
-			path->path_cookie = rt->rt6i_node->fn_sernum;
+		path->path_cookie = rt6_get_cookie(rt);
 	}
 
 	path->u.rt6.rt6i_nfheader_len = nfheader_len;
@@ -106,16 +98,13 @@
 		return -ENODEV;
 	}
 
-	rt6_transfer_peer(&xdst->u.rt6, rt);
-
 	/* Sheit... I remember I did this right. Apparently,
 	 * it was magically lost, so this code needs audit */
 	xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
 						   RTF_LOCAL);
 	xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
 	xdst->u.rt6.rt6i_node = rt->rt6i_node;
-	if (rt->rt6i_node)
-		xdst->route_cookie = rt->rt6i_node->fn_sernum;
+	xdst->route_cookie = rt6_get_cookie(rt);
 	xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
 	xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
 	xdst->u.rt6.rt6i_src = rt->rt6i_src;
@@ -255,10 +244,6 @@
 	if (likely(xdst->u.rt6.rt6i_idev))
 		in6_dev_put(xdst->u.rt6.rt6i_idev);
 	dst_destroy_metrics_generic(dst);
-	if (rt6_has_peer(&xdst->u.rt6)) {
-		struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
-		inet_putpeer(peer);
-	}
 	xfrm_dst_destroy(xdst);
 }
 
@@ -308,7 +293,6 @@
 	.get_saddr =		xfrm6_get_saddr,
 	.decode_session =	_decode_session6,
 	.get_tos =		xfrm6_get_tos,
-	.init_dst =		xfrm6_init_dst,
 	.init_path =		xfrm6_init_path,
 	.fill_dst =		xfrm6_fill_dst,
 	.blackhole_route =	ip6_blackhole_route,
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 4ea5d74..48d0dc89b 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1347,7 +1347,7 @@
 		goto out;
 
 	rc = -ENOMEM;
-	sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
+	sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, kern);
 	if (!sk)
 		goto out;
 
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index ee0ea25..fae6822c 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1100,7 +1100,7 @@
 	}
 
 	/* Allocate networking socket */
-	sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto);
+	sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto, kern);
 	if (sk == NULL)
 		return -ENOMEM;
 
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 0c4c115..f2280f7 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -60,8 +60,8 @@
 	 * to avoid messing with for incoming connections requests and
 	 * to accommodate devices that perform discovery slower than us.
 	 * Jean II */
-	timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)
-		   + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT);
+	timeout = msecs_to_jiffies(sysctl_slot_timeout) * (S - s)
+		   + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT;
 
 	/* Set or re-set the timer. We reset the timer for each received
 	 * discovery query, which allow us to automatically adjust to
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 6daa52a..918151c 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -535,12 +535,12 @@
 		sk->sk_type = parent->sk_type;
 }
 
-static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
+static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
 {
 	struct sock *sk;
 	struct iucv_sock *iucv;
 
-	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
+	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
 	if (!sk)
 		return NULL;
 	iucv = iucv_sk(sk);
@@ -602,7 +602,7 @@
 		return -ESOCKTNOSUPPORT;
 	}
 
-	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
+	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
 	if (!sk)
 		return -ENOMEM;
 
@@ -1723,7 +1723,7 @@
 	}
 
 	/* Create the new socket */
-	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
 	if (!nsk) {
 		err = pr_iucv->path_sever(path, user_data);
 		iucv_path_free(path);
@@ -1933,7 +1933,7 @@
 		goto out;
 	}
 
-	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
 	bh_lock_sock(sk);
 	if ((sk->sk_state != IUCV_LISTEN) ||
 	    sk_acceptq_is_full(sk) ||
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3c5b8ce..b397f0a 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -149,7 +149,7 @@
 		return -EPROTONOSUPPORT;
 
 	err = -ENOMEM;
-	sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto);
+	sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
 	if (sk == NULL)
 		goto out;
 
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index a29a504..f6b090d 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1334,9 +1334,10 @@
 		if (sock)
 			inet_shutdown(sock, 2);
 	} else {
-		if (sock)
+		if (sock) {
 			kernel_sock_shutdown(sock, SHUT_RDWR);
-		sk_release_kernel(sk);
+			sock_release(sock);
+		}
 	}
 
 	l2tp_tunnel_sock_put(sk);
@@ -1399,13 +1400,11 @@
 		if (cfg->local_ip6 && cfg->peer_ip6) {
 			struct sockaddr_l2tpip6 ip6_addr = {0};
 
-			err = sock_create_kern(AF_INET6, SOCK_DGRAM,
+			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
 					  IPPROTO_L2TP, &sock);
 			if (err < 0)
 				goto out;
 
-			sk_change_net(sock->sk, net);
-
 			ip6_addr.l2tp_family = AF_INET6;
 			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
 			       sizeof(ip6_addr.l2tp_addr));
@@ -1429,13 +1428,11 @@
 		{
 			struct sockaddr_l2tpip ip_addr = {0};
 
-			err = sock_create_kern(AF_INET, SOCK_DGRAM,
+			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
 					  IPPROTO_L2TP, &sock);
 			if (err < 0)
 				goto out;
 
-			sk_change_net(sock->sk, net);
-
 			ip_addr.l2tp_family = AF_INET;
 			ip_addr.l2tp_addr = cfg->local_ip;
 			ip_addr.l2tp_conn_id = tunnel_id;
@@ -1462,7 +1459,7 @@
 	*sockp = sock;
 	if ((err < 0) && sock) {
 		kernel_sock_shutdown(sock, SHUT_RDWR);
-		sk_release_kernel(sock->sk);
+		sock_release(sock);
 		*sockp = NULL;
 	}
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index e9b0dec..f56c9f6 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -542,12 +542,12 @@
 
 /* socket() handler. Initialize a new struct sock.
  */
-static int pppol2tp_create(struct net *net, struct socket *sock)
+static int pppol2tp_create(struct net *net, struct socket *sock, int kern)
 {
 	int error = -ENOMEM;
 	struct sock *sk;
 
-	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, kern);
 	if (!sk)
 		goto out;
 
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 17a8dff..8fd9feb 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -168,7 +168,7 @@
 
 	if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
 		rc = -ENOMEM;
-		sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto);
+		sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto, kern);
 		if (sk) {
 			rc = 0;
 			llc_ui_sk_init(sock, sk);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 81a61fc..3e821da 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -768,7 +768,7 @@
 					     struct llc_addr *daddr)
 {
 	struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
-					  sk->sk_prot);
+					  sk->sk_prot, 0);
 	struct llc_sock *newllc, *llc = llc_sk(sk);
 
 	if (!newsk)
@@ -931,9 +931,9 @@
  *	Allocates a LLC sock and initializes it. Returns the new LLC sock
  *	or %NULL if there's no memory available for one
  */
-struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern)
 {
-	struct sock *sk = sk_alloc(net, family, priority, prot);
+	struct sock *sk = sk_alloc(net, family, priority, prot, kern);
 
 	if (!sk)
 		goto out;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 64a012a..086de49 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -302,6 +302,20 @@
 	---help---
 	  Selecting this option causes mac80211 to keep additional
 	  and very verbose statistics about TX and RX handler use
-	  and show them in debugfs.
+	  as well as a few selected dot11 counters. These will be
+	  exposed in debugfs.
+
+	  Note that some of the counters are not concurrency safe
+	  and may thus not always be accurate.
 
 	  If unsure, say N.
+
+config MAC80211_STA_HASH_MAX_SIZE
+	int "Station hash table maximum size" if MAC80211_DEBUG_MENU
+	default 0
+	---help---
+	  Setting this option to a low value (e.g. 4) allows testing the
+	  hash table with collisions relatively deterministically (just
+	  connect more stations than the number selected here.)
+
+	  If unsure, leave the default of 0.
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index cce9d42..c8ba2e7 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -564,8 +564,8 @@
 		return -EINVAL;
 
 	if ((tid >= IEEE80211_NUM_TIDS) ||
-	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
-	    (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
+	    !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
+	    ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
 		return -EINVAL;
 
 	ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ff347a0..bf7023f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2,7 +2,7 @@
  * mac80211 configuration hooks for cfg80211
  *
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2013-2015  Intel Mobile Communications GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -137,6 +137,9 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	sdata->noack_map = noack_map;
+
+	ieee80211_check_fast_xmit_iface(sdata);
+
 	return 0;
 }
 
@@ -309,6 +312,7 @@
 	u32 iv32;
 	u16 iv16;
 	int err = -ENOENT;
+	struct ieee80211_key_seq kseq = {};
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
@@ -339,10 +343,12 @@
 		iv32 = key->u.tkip.tx.iv32;
 		iv16 = key->u.tkip.tx.iv16;
 
-		if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
-			drv_get_tkip_seq(sdata->local,
-					 key->conf.hw_key_idx,
-					 &iv32, &iv16);
+		if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+		    !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+			drv_get_key_seq(sdata->local, key, &kseq);
+			iv32 = kseq.tkip.iv32;
+			iv16 = kseq.tkip.iv16;
+		}
 
 		seq[0] = iv16 & 0xff;
 		seq[1] = (iv16 >> 8) & 0xff;
@@ -355,51 +361,43 @@
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 	case WLAN_CIPHER_SUITE_CCMP_256:
-		pn64 = atomic64_read(&key->u.ccmp.tx_pn);
-		seq[0] = pn64;
-		seq[1] = pn64 >> 8;
-		seq[2] = pn64 >> 16;
-		seq[3] = pn64 >> 24;
-		seq[4] = pn64 >> 32;
-		seq[5] = pn64 >> 40;
-		params.seq = seq;
-		params.seq_len = 6;
-		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
-		pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
-		seq[0] = pn64;
-		seq[1] = pn64 >> 8;
-		seq[2] = pn64 >> 16;
-		seq[3] = pn64 >> 24;
-		seq[4] = pn64 >> 32;
-		seq[5] = pn64 >> 40;
-		params.seq = seq;
-		params.seq_len = 6;
-		break;
+		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
+			     offsetof(typeof(kseq), aes_cmac));
 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
-		pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
-		seq[0] = pn64;
-		seq[1] = pn64 >> 8;
-		seq[2] = pn64 >> 16;
-		seq[3] = pn64 >> 24;
-		seq[4] = pn64 >> 32;
-		seq[5] = pn64 >> 40;
+		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
+			     offsetof(typeof(kseq), aes_gmac));
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
+			     offsetof(typeof(kseq), gcmp));
+
+		if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+		    !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+			drv_get_key_seq(sdata->local, key, &kseq);
+			memcpy(seq, kseq.ccmp.pn, 6);
+		} else {
+			pn64 = atomic64_read(&key->conf.tx_pn);
+			seq[0] = pn64;
+			seq[1] = pn64 >> 8;
+			seq[2] = pn64 >> 16;
+			seq[3] = pn64 >> 24;
+			seq[4] = pn64 >> 32;
+			seq[5] = pn64 >> 40;
+		}
 		params.seq = seq;
 		params.seq_len = 6;
 		break;
-	case WLAN_CIPHER_SUITE_GCMP:
-	case WLAN_CIPHER_SUITE_GCMP_256:
-		pn64 = atomic64_read(&key->u.gcmp.tx_pn);
-		seq[0] = pn64;
-		seq[1] = pn64 >> 8;
-		seq[2] = pn64 >> 16;
-		seq[3] = pn64 >> 24;
-		seq[4] = pn64 >> 32;
-		seq[5] = pn64 >> 40;
-		params.seq = seq;
-		params.seq_len = 6;
+	default:
+		if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+			break;
+		if (WARN_ON(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
+			break;
+		drv_get_key_seq(sdata->local, key, &kseq);
+		params.seq = kseq.hw.seq;
+		params.seq_len = kseq.hw.seq_len;
 		break;
 	}
 
@@ -1372,6 +1370,7 @@
 		}
 
 		sta->sdata = vlansdata;
+		ieee80211_check_fast_xmit(sta);
 
 		if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
 		    prev_4addr != new_4addr) {
@@ -1764,7 +1763,7 @@
 		/* our RSSI threshold implementation is supported only for
 		 * devices that report signal in dBm.
 		 */
-		if (!(sdata->local->hw.flags & IEEE80211_HW_SIGNAL_DBM))
+		if (!ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM))
 			return -ENOTSUPP;
 		conf->rssi_threshold = nconf->rssi_threshold;
 	}
@@ -2099,10 +2098,14 @@
 	int err;
 
 	if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+		ieee80211_check_fast_xmit_all(local);
+
 		err = drv_set_frag_threshold(local, wiphy->frag_threshold);
 
-		if (err)
+		if (err) {
+			ieee80211_check_fast_xmit_all(local);
 			return err;
+		}
 	}
 
 	if ((changed & WIPHY_PARAM_COVERAGE_CLASS) ||
@@ -2404,7 +2407,7 @@
 	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return -EOPNOTSUPP;
 
-	if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+	if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
 		return -EOPNOTSUPP;
 
 	if (enabled == sdata->u.mgd.powersave &&
@@ -2419,7 +2422,7 @@
 	__ieee80211_request_smps_mgd(sdata, sdata->u.mgd.req_smps);
 	sdata_unlock(sdata);
 
-	if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+	if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 
 	ieee80211_recalc_ps(local, -1);
@@ -2463,7 +2466,7 @@
 	if (!ieee80211_sdata_running(sdata))
 		return -ENETDOWN;
 
-	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+	if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
 		ret = drv_set_bitrate_mask(local, sdata, mask);
 		if (ret)
 			return ret;
@@ -2514,6 +2517,19 @@
 	return true;
 }
 
+static u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local)
+{
+	lockdep_assert_held(&local->mtx);
+
+	local->roc_cookie_counter++;
+
+	/* wow, you wrapped 64 bits ... more likely a bug */
+	if (WARN_ON(local->roc_cookie_counter == 0))
+		local->roc_cookie_counter++;
+
+	return local->roc_cookie_counter;
+}
+
 static int ieee80211_start_roc_work(struct ieee80211_local *local,
 				    struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_channel *channel,
@@ -2551,7 +2567,6 @@
 	roc->req_duration = duration;
 	roc->frame = txskb;
 	roc->type = type;
-	roc->mgmt_tx_cookie = (unsigned long)txskb;
 	roc->sdata = sdata;
 	INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
 	INIT_LIST_HEAD(&roc->dependents);
@@ -2561,17 +2576,10 @@
 	 * or the SKB (for mgmt TX)
 	 */
 	if (!txskb) {
-		/* local->mtx protects this */
-		local->roc_cookie_counter++;
-		roc->cookie = local->roc_cookie_counter;
-		/* wow, you wrapped 64 bits ... more likely a bug */
-		if (WARN_ON(roc->cookie == 0)) {
-			roc->cookie = 1;
-			local->roc_cookie_counter++;
-		}
+		roc->cookie = ieee80211_mgmt_tx_cookie(local);
 		*cookie = roc->cookie;
 	} else {
-		*cookie = (unsigned long)txskb;
+		roc->mgmt_tx_cookie = *cookie;
 	}
 
 	/* if there's one pending or we're scanning, queue this one */
@@ -3244,13 +3252,43 @@
 	return err;
 }
 
+static struct sk_buff *ieee80211_make_ack_skb(struct ieee80211_local *local,
+					      struct sk_buff *skb, u64 *cookie,
+					      gfp_t gfp)
+{
+	unsigned long spin_flags;
+	struct sk_buff *ack_skb;
+	int id;
+
+	ack_skb = skb_copy(skb, gfp);
+	if (!ack_skb)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_irqsave(&local->ack_status_lock, spin_flags);
+	id = idr_alloc(&local->ack_status_frames, ack_skb,
+		       1, 0x10000, GFP_ATOMIC);
+	spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
+
+	if (id < 0) {
+		kfree_skb(ack_skb);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	IEEE80211_SKB_CB(skb)->ack_frame_id = id;
+
+	*cookie = ieee80211_mgmt_tx_cookie(local);
+	IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
+
+	return ack_skb;
+}
+
 static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			     struct cfg80211_mgmt_tx_params *params,
 			     u64 *cookie)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 	struct ieee80211_local *local = sdata->local;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *ack_skb;
 	struct sta_info *sta;
 	const struct ieee80211_mgmt *mgmt = (void *)params->buf;
 	bool need_offchan = false;
@@ -3299,8 +3337,14 @@
 		break;
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
-		if (!sdata->u.mgd.associated)
+		sdata_lock(sdata);
+		if (!sdata->u.mgd.associated ||
+		    (params->offchan && params->wait &&
+		     local->ops->remain_on_channel &&
+		     memcmp(sdata->u.mgd.associated->bssid,
+			    mgmt->bssid, ETH_ALEN)))
 			need_offchan = true;
+		sdata_unlock(sdata);
 		break;
 	case NL80211_IFTYPE_P2P_DEVICE:
 		need_offchan = true;
@@ -3356,6 +3400,7 @@
 	/* Update CSA counters */
 	if (sdata->vif.csa_active &&
 	    (sdata->vif.type == NL80211_IFTYPE_AP ||
+	     sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
 	     sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
 	    params->n_csa_offsets) {
 		int i;
@@ -3382,8 +3427,23 @@
 
 	skb->dev = sdata->dev;
 
+	if (!params->dont_wait_for_ack) {
+		/* make a copy to preserve the frame contents
+		 * in case of encryption.
+		 */
+		ack_skb = ieee80211_make_ack_skb(local, skb, cookie,
+						 GFP_KERNEL);
+		if (IS_ERR(ack_skb)) {
+			ret = PTR_ERR(ack_skb);
+			kfree_skb(skb);
+			goto out_unlock;
+		}
+	} else {
+		/* for cookie below */
+		ack_skb = skb;
+	}
+
 	if (!need_offchan) {
-		*cookie = (unsigned long) skb;
 		ieee80211_tx_skb(sdata, skb);
 		ret = 0;
 		goto out_unlock;
@@ -3391,7 +3451,7 @@
 
 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
 					IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
-	if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+	if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
 		IEEE80211_SKB_CB(skb)->hw_queue =
 			local->hw.offchannel_tx_hw_queue;
 
@@ -3476,7 +3536,7 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_qos_hdr *nullfunc;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *ack_skb;
 	int size = sizeof(*nullfunc);
 	__le16 fc;
 	bool qos;
@@ -3484,20 +3544,24 @@
 	struct sta_info *sta;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	enum ieee80211_band band;
+	int ret;
+
+	/* the lock is needed to assign the cookie later */
+	mutex_lock(&local->mtx);
 
 	rcu_read_lock();
 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 	if (WARN_ON(!chanctx_conf)) {
-		rcu_read_unlock();
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 	band = chanctx_conf->def.chan->band;
 	sta = sta_info_get_bss(sdata, peer);
 	if (sta) {
 		qos = sta->sta.wme;
 	} else {
-		rcu_read_unlock();
-		return -ENOLINK;
+		ret = -ENOLINK;
+		goto unlock;
 	}
 
 	if (qos) {
@@ -3513,8 +3577,8 @@
 
 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
 	if (!skb) {
-		rcu_read_unlock();
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto unlock;
 	}
 
 	skb->dev = dev;
@@ -3540,13 +3604,23 @@
 	if (qos)
 		nullfunc->qos_ctrl = cpu_to_le16(7);
 
+	ack_skb = ieee80211_make_ack_skb(local, skb, cookie, GFP_ATOMIC);
+	if (IS_ERR(ack_skb)) {
+		kfree_skb(skb);
+		ret = PTR_ERR(ack_skb);
+		goto unlock;
+	}
+
 	local_bh_disable();
 	ieee80211_xmit(sdata, sta, skb);
 	local_bh_enable();
-	rcu_read_unlock();
 
-	*cookie = (unsigned long) skb;
-	return 0;
+	ret = 0;
+unlock:
+	rcu_read_unlock();
+	mutex_unlock(&local->mtx);
+
+	return ret;
 }
 
 static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5bcd4e5..f01c18a 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -664,6 +664,8 @@
 		ieee80211_bss_info_change_notify(sdata,
 						 BSS_CHANGED_IDLE);
 
+	ieee80211_check_fast_xmit_iface(sdata);
+
 	return ret;
 }
 
@@ -1008,6 +1010,8 @@
 	if (WARN_ON(!chandef))
 		return -EINVAL;
 
+	ieee80211_change_chanctx(local, new_ctx, chandef);
+
 	vif_chsw[0].vif = &sdata->vif;
 	vif_chsw[0].old_ctx = &old_ctx->conf;
 	vif_chsw[0].new_ctx = &new_ctx->conf;
@@ -1030,6 +1034,8 @@
 	if (sdata->vif.type == NL80211_IFTYPE_AP)
 		__ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
 
+	ieee80211_check_fast_xmit_iface(sdata);
+
 	if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
 		ieee80211_free_chanctx(local, old_ctx);
 
@@ -1079,6 +1085,8 @@
 	if (WARN_ON(!chandef))
 		return -EINVAL;
 
+	ieee80211_change_chanctx(local, new_ctx, chandef);
+
 	list_del(&sdata->reserved_chanctx_list);
 	sdata->reserved_chanctx = NULL;
 
@@ -1376,6 +1384,8 @@
 				__ieee80211_vif_copy_chanctx_to_vlans(sdata,
 								      false);
 
+			ieee80211_check_fast_xmit_iface(sdata);
+
 			sdata->radar_required = sdata->reserved_radar_required;
 
 			if (sdata->vif.bss_conf.chandef.width !=
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 23813eb..3ea8b7d 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -1,4 +1,3 @@
-
 /*
  * mac80211 debugfs for wireless PHYs
  *
@@ -92,62 +91,66 @@
 };
 #endif
 
+static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
+#define FLAG(F)	[IEEE80211_HW_##F] = #F
+	FLAG(HAS_RATE_CONTROL),
+	FLAG(RX_INCLUDES_FCS),
+	FLAG(HOST_BROADCAST_PS_BUFFERING),
+	FLAG(SIGNAL_UNSPEC),
+	FLAG(SIGNAL_DBM),
+	FLAG(NEED_DTIM_BEFORE_ASSOC),
+	FLAG(SPECTRUM_MGMT),
+	FLAG(AMPDU_AGGREGATION),
+	FLAG(SUPPORTS_PS),
+	FLAG(PS_NULLFUNC_STACK),
+	FLAG(SUPPORTS_DYNAMIC_PS),
+	FLAG(MFP_CAPABLE),
+	FLAG(WANT_MONITOR_VIF),
+	FLAG(NO_AUTO_VIF),
+	FLAG(SW_CRYPTO_CONTROL),
+	FLAG(SUPPORT_FAST_XMIT),
+	FLAG(REPORTS_TX_ACK_STATUS),
+	FLAG(CONNECTION_MONITOR),
+	FLAG(QUEUE_CONTROL),
+	FLAG(SUPPORTS_PER_STA_GTK),
+	FLAG(AP_LINK_PS),
+	FLAG(TX_AMPDU_SETUP_IN_HW),
+	FLAG(SUPPORTS_RC_TABLE),
+	FLAG(P2P_DEV_ADDR_FOR_INTF),
+	FLAG(TIMING_BEACON_ONLY),
+	FLAG(SUPPORTS_HT_CCK_RATES),
+	FLAG(CHANCTX_STA_CSA),
+	FLAG(SUPPORTS_CLONED_SKBS),
+	FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+
+	/* keep last for the build bug below */
+	(void *)0x1
+#undef FLAG
+};
+
 static ssize_t hwflags_read(struct file *file, char __user *user_buf,
 			    size_t count, loff_t *ppos)
 {
 	struct ieee80211_local *local = file->private_data;
-	int mxln = 500;
+	size_t bufsz = 30 * NUM_IEEE80211_HW_FLAGS;
+	char *buf = kzalloc(bufsz, GFP_KERNEL);
+	char *pos = buf, *end = buf + bufsz - 1;
 	ssize_t rv;
-	char *buf = kzalloc(mxln, GFP_KERNEL);
-	int sf = 0; /* how many written so far */
+	int i;
 
 	if (!buf)
-		return 0;
+		return -ENOMEM;
 
-	sf += scnprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
-	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
-		sf += scnprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
-	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
-		sf += scnprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
-	if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
-		sf += scnprintf(buf + sf, mxln - sf,
-				"HOST_BCAST_PS_BUFFERING\n");
-	if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
-		sf += scnprintf(buf + sf, mxln - sf,
-				"2GHZ_SHORT_SLOT_INCAPABLE\n");
-	if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
-		sf += scnprintf(buf + sf, mxln - sf,
-				"2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
-	if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
-		sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
-	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
-		sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
-	if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
-		sf += scnprintf(buf + sf, mxln - sf,
-				"NEED_DTIM_BEFORE_ASSOC\n");
-	if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
-		sf += scnprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
-	if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
-		sf += scnprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
-	if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
-		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
-	if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
-		sf += scnprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
-	if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
-		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
-	if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
-		sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
-	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
-		sf += scnprintf(buf + sf, mxln - sf,
-				"REPORTS_TX_ACK_STATUS\n");
-	if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
-		sf += scnprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
-	if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
-		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
-	if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
-		sf += scnprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
-	if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
-		sf += scnprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
+	/* fail compilation if somebody adds or removes
+	 * a flag without updating the name array above
+	 */
+	BUILD_BUG_ON(hw_flag_names[NUM_IEEE80211_HW_FLAGS] != (void *)0x1);
+
+	for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
+		if (test_bit(i, local->hw.flags))
+			pos += scnprintf(pos, end - pos, "%s",
+					 hw_flag_names[i]);
+	}
 
 	rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
 	kfree(buf);
@@ -219,8 +222,8 @@
 	.llseek = generic_file_llseek,					\
 };
 
-#define DEBUGFS_STATS_ADD(name, field)					\
-	debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
+#define DEBUGFS_STATS_ADD(name)					\
+	debugfs_create_u32(#name, 0400, statsd, &local->name);
 #define DEBUGFS_DEVSTATS_ADD(name)					\
 	debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
 
@@ -255,53 +258,31 @@
 	if (!statsd)
 		return;
 
-	DEBUGFS_STATS_ADD(transmitted_fragment_count,
-		local->dot11TransmittedFragmentCount);
-	DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
-		local->dot11MulticastTransmittedFrameCount);
-	DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
-	DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
-	DEBUGFS_STATS_ADD(multiple_retry_count,
-		local->dot11MultipleRetryCount);
-	DEBUGFS_STATS_ADD(frame_duplicate_count,
-		local->dot11FrameDuplicateCount);
-	DEBUGFS_STATS_ADD(received_fragment_count,
-		local->dot11ReceivedFragmentCount);
-	DEBUGFS_STATS_ADD(multicast_received_frame_count,
-		local->dot11MulticastReceivedFrameCount);
-	DEBUGFS_STATS_ADD(transmitted_frame_count,
-		local->dot11TransmittedFrameCount);
 #ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-	DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
-	DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
-		local->tx_handlers_drop_fragment);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
-		local->tx_handlers_drop_wep);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
-		local->tx_handlers_drop_not_assoc);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
-		local->tx_handlers_drop_unauth_port);
-	DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
-	DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
-		local->rx_handlers_drop_nullfunc);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
-		local->rx_handlers_drop_defrag);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_short,
-		local->rx_handlers_drop_short);
-	DEBUGFS_STATS_ADD(tx_expand_skb_head,
-		local->tx_expand_skb_head);
-	DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
-		local->tx_expand_skb_head_cloned);
-	DEBUGFS_STATS_ADD(rx_expand_skb_head,
-		local->rx_expand_skb_head);
-	DEBUGFS_STATS_ADD(rx_expand_skb_head2,
-		local->rx_expand_skb_head2);
-	DEBUGFS_STATS_ADD(rx_handlers_fragments,
-		local->rx_handlers_fragments);
-	DEBUGFS_STATS_ADD(tx_status_drop,
-		local->tx_status_drop);
+	DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount);
+	DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount);
+	DEBUGFS_STATS_ADD(dot11FailedCount);
+	DEBUGFS_STATS_ADD(dot11RetryCount);
+	DEBUGFS_STATS_ADD(dot11MultipleRetryCount);
+	DEBUGFS_STATS_ADD(dot11FrameDuplicateCount);
+	DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount);
+	DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount);
+	DEBUGFS_STATS_ADD(dot11TransmittedFrameCount);
+	DEBUGFS_STATS_ADD(tx_handlers_drop);
+	DEBUGFS_STATS_ADD(tx_handlers_queued);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_wep);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port);
+	DEBUGFS_STATS_ADD(rx_handlers_drop);
+	DEBUGFS_STATS_ADD(rx_handlers_queued);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_short);
+	DEBUGFS_STATS_ADD(tx_expand_skb_head);
+	DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
+	DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
+	DEBUGFS_STATS_ADD(rx_handlers_fragments);
+	DEBUGFS_STATS_ADD(tx_status_drop);
 #endif
 	DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
 	DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 71ac1b5..e82bf1e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -95,28 +95,13 @@
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 	case WLAN_CIPHER_SUITE_CCMP_256:
-		pn = atomic64_read(&key->u.ccmp.tx_pn);
-		len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
-				(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
-				(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
-		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
-		pn = atomic64_read(&key->u.aes_cmac.tx_pn);
-		len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
-				(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
-				(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
-		break;
 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
-		pn = atomic64_read(&key->u.aes_gmac.tx_pn);
-		len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
-				(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
-				(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
-		break;
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
-		pn = atomic64_read(&key->u.gcmp.tx_pn);
+		pn = atomic64_read(&key->conf.tx_pn);
 		len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
 				(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
 				(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 252859e..06d5293 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -29,8 +29,6 @@
 				      format_string, sta->field);	\
 }
 #define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
-#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
-#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
 
 #define STA_OPS(name)							\
 static const struct file_operations sta_ ##name## _ops = {		\
@@ -52,10 +50,7 @@
 		STA_OPS(name)
 
 STA_FILE(aid, sta.aid, D);
-STA_FILE(dev, sdata->name, S);
-STA_FILE(last_signal, last_signal, D);
 STA_FILE(last_ack_signal, last_ack_signal, D);
-STA_FILE(beacon_loss_count, beacon_loss_count, D);
 
 static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
 			      size_t count, loff_t *ppos)
@@ -101,40 +96,6 @@
 }
 STA_OPS(num_ps_buf_frames);
 
-static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
-				    size_t count, loff_t *ppos)
-{
-	struct sta_info *sta = file->private_data;
-	return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
-				      jiffies_to_msecs(jiffies - sta->last_rx));
-}
-STA_OPS(inactive_ms);
-
-
-static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
-					size_t count, loff_t *ppos)
-{
-	struct sta_info *sta = file->private_data;
-	struct timespec uptime;
-	struct tm result;
-	long connected_time_secs;
-	char buf[100];
-	int res;
-	ktime_get_ts(&uptime);
-	connected_time_secs = uptime.tv_sec - sta->last_connected;
-	time_to_tm(connected_time_secs, 0, &result);
-	result.tm_year -= 70;
-	result.tm_mday -= 1;
-	res = scnprintf(buf, sizeof(buf),
-		"years  - %ld\nmonths - %d\ndays   - %d\nclock  - %d:%d:%d\n\n",
-			result.tm_year, result.tm_mon, result.tm_mday,
-			result.tm_hour, result.tm_min, result.tm_sec);
-	return simple_read_from_buffer(userbuf, count, ppos, buf, res);
-}
-STA_OPS(connected_time);
-
-
-
 static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
 				      size_t count, loff_t *ppos)
 {
@@ -359,37 +320,6 @@
 }
 STA_OPS(vht_capa);
 
-static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf,
-					size_t count, loff_t *ppos)
-{
-	struct sta_info *sta = file->private_data;
-	struct rate_info rinfo;
-	u16 rate;
-	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
-	rate = cfg80211_calculate_bitrate(&rinfo);
-
-	return mac80211_format_buffer(userbuf, count, ppos,
-				      "%d.%d MBit/s\n",
-				      rate/10, rate%10);
-}
-STA_OPS(current_tx_rate);
-
-static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf,
-				     size_t count, loff_t *ppos)
-{
-	struct sta_info *sta = file->private_data;
-	struct rate_info rinfo;
-	u16 rate;
-
-	sta_set_rate_info_rx(sta, &rinfo);
-
-	rate = cfg80211_calculate_bitrate(&rinfo);
-
-	return mac80211_format_buffer(userbuf, count, ppos,
-				      "%d.%d MBit/s\n",
-				      rate/10, rate%10);
-}
-STA_OPS(last_rx_rate);
 
 #define DEBUGFS_ADD(name) \
 	debugfs_create_file(#name, 0400, \
@@ -432,30 +362,15 @@
 
 	DEBUGFS_ADD(flags);
 	DEBUGFS_ADD(num_ps_buf_frames);
-	DEBUGFS_ADD(inactive_ms);
-	DEBUGFS_ADD(connected_time);
 	DEBUGFS_ADD(last_seq_ctrl);
 	DEBUGFS_ADD(agg_status);
-	DEBUGFS_ADD(dev);
-	DEBUGFS_ADD(last_signal);
-	DEBUGFS_ADD(beacon_loss_count);
 	DEBUGFS_ADD(ht_capa);
 	DEBUGFS_ADD(vht_capa);
 	DEBUGFS_ADD(last_ack_signal);
-	DEBUGFS_ADD(current_tx_rate);
-	DEBUGFS_ADD(last_rx_rate);
 
-	DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
-	DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
-	DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
-	DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
 	DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
 	DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
-	DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
-	DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
 	DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
-	DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
-	DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
 
 	if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
 		debugfs_create_x32("driver_buffered_tids", 0400,
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 26e1ca8..32a2e70 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -146,7 +146,7 @@
 
 	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
 		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-		     !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF) &&
+		     !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
 		     !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
 		return -EINVAL;
 
@@ -417,12 +417,13 @@
 	return ret;
 }
 
-static inline void drv_get_tkip_seq(struct ieee80211_local *local,
-				    u8 hw_key_idx, u32 *iv32, u16 *iv16)
+static inline void drv_get_key_seq(struct ieee80211_local *local,
+				   struct ieee80211_key *key,
+				   struct ieee80211_key_seq *seq)
 {
-	if (local->ops->get_tkip_seq)
-		local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16);
-	trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16);
+	if (local->ops->get_key_seq)
+		local->ops->get_key_seq(&local->hw, &key->conf, seq);
+	trace_drv_get_key_seq(local, &key->conf);
 }
 
 static inline int drv_set_frag_threshold(struct ieee80211_local *local,
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 52bcea6..188faab 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -38,7 +38,7 @@
 static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
 	"rx_packets", "rx_bytes",
 	"rx_duplicates", "rx_fragments", "rx_dropped",
-	"tx_packets", "tx_bytes", "tx_fragments",
+	"tx_packets", "tx_bytes",
 	"tx_filtered", "tx_retry_failed", "tx_retries",
 	"beacon_loss", "sta_state", "txrate", "rxrate", "signal",
 	"channel", "noise", "ch_time", "ch_time_busy",
@@ -87,7 +87,6 @@
 							\
 		data[i++] += sinfo.tx_packets;		\
 		data[i++] += sinfo.tx_bytes;		\
-		data[i++] += sta->tx_fragments;		\
 		data[i++] += sta->tx_filtered_count;	\
 		data[i++] += sta->tx_retry_failed;	\
 		data[i++] += sta->tx_retry_count;	\
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index bfef1b2..7f72bc9 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -146,6 +146,7 @@
 				csa_settings->chandef.chan->center_freq);
 		presp->csa_counter_offsets[0] = (pos - presp->head);
 		*pos++ = csa_settings->count;
+		presp->csa_current_counter = csa_settings->count;
 	}
 
 	/* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
@@ -1031,8 +1032,11 @@
 		}
 	}
 
-	if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS)
+	if (sta && !sta->sta.wme &&
+	    elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) {
 		sta->sta.wme = true;
+		ieee80211_check_fast_xmit(sta);
+	}
 
 	if (sta && elems->ht_operation && elems->ht_cap_elem &&
 	    sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c0a9187..b12f615 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -181,8 +181,6 @@
 
 /**
  * enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
- *	(incl. multicast frames)
  * @IEEE80211_RX_FRAGMENTED: fragmented frame
  * @IEEE80211_RX_AMSDU: a-MSDU packet
  * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
@@ -192,7 +190,6 @@
  * @rx_flags field of &struct ieee80211_rx_status.
  */
 enum ieee80211_packet_rx_flags {
-	IEEE80211_RX_RA_MATCH			= BIT(1),
 	IEEE80211_RX_FRAGMENTED			= BIT(2),
 	IEEE80211_RX_AMSDU			= BIT(3),
 	IEEE80211_RX_MALFORMED_ACTION_FRM	= BIT(4),
@@ -722,7 +719,6 @@
  * enum ieee80211_sub_if_data_flags - virtual interface flags
  *
  * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
- * @IEEE80211_SDATA_PROMISC: interface is promisc
  * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
  * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
  *	associated stations and deliver multicast frames both
@@ -732,7 +728,6 @@
  */
 enum ieee80211_sub_if_data_flags {
 	IEEE80211_SDATA_ALLMULTI		= BIT(0),
-	IEEE80211_SDATA_PROMISC			= BIT(1),
 	IEEE80211_SDATA_OPERATING_GMODE		= BIT(2),
 	IEEE80211_SDATA_DONT_BRIDGE_PACKETS	= BIT(3),
 	IEEE80211_SDATA_DISCONNECT_RESUME	= BIT(4),
@@ -1040,7 +1035,6 @@
 
 #ifdef CONFIG_MAC80211_LEDS
 struct tpt_led_trigger {
-	struct led_trigger trig;
 	char name[32];
 	const struct ieee80211_tpt_blink *blink_table;
 	unsigned int blink_table_len;
@@ -1208,8 +1202,8 @@
 
 	atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
 
-	/* number of interfaces with corresponding IFF_ flags */
-	atomic_t iff_allmultis, iff_promiscs;
+	/* number of interfaces with allmulti RX */
+	atomic_t iff_allmultis;
 
 	struct rate_control_ref *rate_ctrl;
 
@@ -1261,6 +1255,15 @@
 	struct list_head chanctx_list;
 	struct mutex chanctx_mtx;
 
+#ifdef CONFIG_MAC80211_LEDS
+	struct led_trigger tx_led, rx_led, assoc_led, radio_led;
+	struct led_trigger tpt_led;
+	atomic_t tx_led_active, rx_led_active, assoc_led_active;
+	atomic_t radio_led_active, tpt_led_active;
+	struct tpt_led_trigger *tpt_led_trigger;
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
 	/* SNMP counters */
 	/* dot11CountersTable */
 	u32 dot11TransmittedFragmentCount;
@@ -1273,18 +1276,9 @@
 	u32 dot11MulticastReceivedFrameCount;
 	u32 dot11TransmittedFrameCount;
 
-#ifdef CONFIG_MAC80211_LEDS
-	struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
-	struct tpt_led_trigger *tpt_led_trigger;
-	char tx_led_name[32], rx_led_name[32],
-	     assoc_led_name[32], radio_led_name[32];
-#endif
-
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
 	/* TX/RX handler statistics */
 	unsigned int tx_handlers_drop;
 	unsigned int tx_handlers_queued;
-	unsigned int tx_handlers_drop_fragment;
 	unsigned int tx_handlers_drop_wep;
 	unsigned int tx_handlers_drop_not_assoc;
 	unsigned int tx_handlers_drop_unauth_port;
@@ -1295,8 +1289,7 @@
 	unsigned int rx_handlers_drop_short;
 	unsigned int tx_expand_skb_head;
 	unsigned int tx_expand_skb_head_cloned;
-	unsigned int rx_expand_skb_head;
-	unsigned int rx_expand_skb_head2;
+	unsigned int rx_expand_skb_head_defrag;
 	unsigned int rx_handlers_fragments;
 	unsigned int tx_status_drop;
 #define I802_DEBUG_INC(c) (c)++
@@ -1648,6 +1641,11 @@
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
 			      struct sk_buff *skb, u32 info_flags);
 
+void ieee80211_check_fast_xmit(struct sta_info *sta);
+void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
+void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_clear_fast_xmit(struct sta_info *sta);
+
 /* HT */
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_sta_ht_cap *ht_cap);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 84cef60..ed1edac 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -338,7 +338,7 @@
 	if ((iftype != NL80211_IFTYPE_AP &&
 	     iftype != NL80211_IFTYPE_P2P_GO &&
 	     iftype != NL80211_IFTYPE_MESH_POINT) ||
-	    !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
+	    !ieee80211_hw_check(&sdata->local->hw, QUEUE_CONTROL)) {
 		sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
 		return 0;
 	}
@@ -378,7 +378,7 @@
 	int i;
 
 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-		if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+		if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
 			sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
 		else if (local->hw.queues >= IEEE80211_NUM_ACS)
 			sdata->vif.hw_queue[i] = i;
@@ -393,7 +393,7 @@
 	struct ieee80211_sub_if_data *sdata;
 	int ret;
 
-	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+	if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
 		return 0;
 
 	ASSERT_RTNL();
@@ -454,7 +454,7 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 
-	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+	if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
 		return;
 
 	ASSERT_RTNL();
@@ -703,9 +703,6 @@
 	if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
 		atomic_inc(&local->iff_allmultis);
 
-	if (sdata->flags & IEEE80211_SDATA_PROMISC)
-		atomic_inc(&local->iff_promiscs);
-
 	if (coming_up)
 		local->open_count++;
 
@@ -835,13 +832,10 @@
 		     ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
 		      (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
 
-	/* don't count this interface for promisc/allmulti while it is down */
+	/* don't count this interface for allmulti while it is down */
 	if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
 		atomic_dec(&local->iff_allmultis);
 
-	if (sdata->flags & IEEE80211_SDATA_PROMISC)
-		atomic_dec(&local->iff_promiscs);
-
 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		local->fif_pspoll--;
 		local->fif_probe_req--;
@@ -1055,12 +1049,10 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
-	int allmulti, promisc, sdata_allmulti, sdata_promisc;
+	int allmulti, sdata_allmulti;
 
 	allmulti = !!(dev->flags & IFF_ALLMULTI);
-	promisc = !!(dev->flags & IFF_PROMISC);
 	sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
-	sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
 
 	if (allmulti != sdata_allmulti) {
 		if (dev->flags & IFF_ALLMULTI)
@@ -1070,13 +1062,6 @@
 		sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
 	}
 
-	if (promisc != sdata_promisc) {
-		if (dev->flags & IFF_PROMISC)
-			atomic_inc(&local->iff_promiscs);
-		else
-			atomic_dec(&local->iff_promiscs);
-		sdata->flags ^= IEEE80211_SDATA_PROMISC;
-	}
 	spin_lock_bh(&local->filter_lock);
 	__hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
 	spin_unlock_bh(&local->filter_lock);
@@ -1117,6 +1102,35 @@
 	return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
 }
 
+static struct rtnl_link_stats64 *
+ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		const struct pcpu_sw_netstats *tstats;
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
+
+		tstats = per_cpu_ptr(dev->tstats, i);
+
+		do {
+			start = u64_stats_fetch_begin_irq(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
+
+		stats->rx_packets += rx_packets;
+		stats->tx_packets += tx_packets;
+		stats->rx_bytes   += rx_bytes;
+		stats->tx_bytes   += tx_bytes;
+	}
+
+	return stats;
+}
+
 static const struct net_device_ops ieee80211_dataif_ops = {
 	.ndo_open		= ieee80211_open,
 	.ndo_stop		= ieee80211_stop,
@@ -1126,6 +1140,7 @@
 	.ndo_change_mtu 	= ieee80211_change_mtu,
 	.ndo_set_mac_address 	= ieee80211_change_mac,
 	.ndo_select_queue	= ieee80211_netdev_select_queue,
+	.ndo_get_stats64	= ieee80211_get_stats64,
 };
 
 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
@@ -1159,14 +1174,21 @@
 	.ndo_change_mtu 	= ieee80211_change_mtu,
 	.ndo_set_mac_address 	= ieee80211_change_mac,
 	.ndo_select_queue	= ieee80211_monitor_select_queue,
+	.ndo_get_stats64	= ieee80211_get_stats64,
 };
 
+static void ieee80211_if_free(struct net_device *dev)
+{
+	free_percpu(dev->tstats);
+	free_netdev(dev);
+}
+
 static void ieee80211_if_setup(struct net_device *dev)
 {
 	ether_setup(dev);
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	dev->netdev_ops = &ieee80211_dataif_ops;
-	dev->destructor = free_netdev;
+	dev->destructor = ieee80211_if_free;
 }
 
 static void ieee80211_iface_work(struct work_struct *work)
@@ -1564,7 +1586,7 @@
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_P2P_GO:
-		if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+		if (ieee80211_hw_check(&local->hw, P2P_DEV_ADDR_FOR_INTF)) {
 			list_for_each_entry(sdata, &local->interfaces, list) {
 				if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
 					continue;
@@ -1707,6 +1729,12 @@
 			return -ENOMEM;
 		dev_net_set(ndev, wiphy_net(local->hw.wiphy));
 
+		ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+		if (!ndev->tstats) {
+			free_netdev(ndev);
+			return -ENOMEM;
+		}
+
 		ndev->needed_headroom = local->tx_headroom +
 					4*6 /* four MAC addresses */
 					+ 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a907f2d..b22df3a 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -66,12 +66,15 @@
 	if (sdata->vif.type != NL80211_IFTYPE_AP)
 		return;
 
-	mutex_lock(&sdata->local->mtx);
+	/* crypto_tx_tailroom_needed_cnt is protected by this */
+	assert_key_lock(sdata->local);
 
-	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
 		vlan->crypto_tx_tailroom_needed_cnt += delta;
 
-	mutex_unlock(&sdata->local->mtx);
+	rcu_read_unlock();
 }
 
 static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
@@ -95,6 +98,8 @@
 	 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
 	 */
 
+	assert_key_lock(sdata->local);
+
 	update_vlan_tailroom_need_count(sdata, 1);
 
 	if (!sdata->crypto_tx_tailroom_needed_cnt++) {
@@ -109,6 +114,8 @@
 static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
 					 int delta)
 {
+	assert_key_lock(sdata->local);
+
 	WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
 
 	update_vlan_tailroom_need_count(sdata, -delta);
@@ -147,7 +154,7 @@
 	 * is supported; if not, return.
 	 */
 	if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) &&
-	    !(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK))
+	    !ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK))
 		goto out_unsupported;
 
 	if (sta && !sta->uploaded)
@@ -201,7 +208,7 @@
 		/* all of these we can do in software - if driver can */
 		if (ret == 1)
 			return 0;
-		if (key->local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL)
+		if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
 			return -EINVAL;
 		return 0;
 	default:
@@ -256,6 +263,7 @@
 
 	if (uni) {
 		rcu_assign_pointer(sdata->default_unicast_key, key);
+		ieee80211_check_fast_xmit_iface(sdata);
 		drv_set_default_unicast_key(sdata->local, sdata, idx);
 	}
 
@@ -325,6 +333,7 @@
 		if (pairwise) {
 			rcu_assign_pointer(sta->ptk[idx], new);
 			sta->ptk_idx = idx;
+			ieee80211_check_fast_xmit(sta);
 		} else {
 			rcu_assign_pointer(sta->gtk[idx], new);
 			sta->gtk_idx = idx;
@@ -510,15 +519,17 @@
 		break;
 	default:
 		if (cs) {
-			size_t len = (seq_len > MAX_PN_LEN) ?
-						MAX_PN_LEN : seq_len;
+			if (seq_len && seq_len != cs->pn_len) {
+				kfree(key);
+				return ERR_PTR(-EINVAL);
+			}
 
 			key->conf.iv_len = cs->hdr_len;
 			key->conf.icv_len = cs->mic_len;
 			for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
-				for (j = 0; j < len; j++)
+				for (j = 0; j < seq_len; j++)
 					key->u.gen.rx_pn[i][j] =
-							seq[len - j - 1];
+							seq[seq_len - j - 1];
 			key->flags |= KEY_FLAG_CIPHER_SCHEME;
 		}
 	}
@@ -892,27 +903,19 @@
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 	case WLAN_CIPHER_SUITE_CCMP_256:
-		pn64 = atomic64_read(&key->u.ccmp.tx_pn);
-		seq->ccmp.pn[5] = pn64;
-		seq->ccmp.pn[4] = pn64 >> 8;
-		seq->ccmp.pn[3] = pn64 >> 16;
-		seq->ccmp.pn[2] = pn64 >> 24;
-		seq->ccmp.pn[1] = pn64 >> 32;
-		seq->ccmp.pn[0] = pn64 >> 40;
-		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
-		pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
-		seq->ccmp.pn[5] = pn64;
-		seq->ccmp.pn[4] = pn64 >> 8;
-		seq->ccmp.pn[3] = pn64 >> 16;
-		seq->ccmp.pn[2] = pn64 >> 24;
-		seq->ccmp.pn[1] = pn64 >> 32;
-		seq->ccmp.pn[0] = pn64 >> 40;
-		break;
+		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+			     offsetof(typeof(*seq), aes_cmac));
 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
-		pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+			     offsetof(typeof(*seq), aes_gmac));
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+			     offsetof(typeof(*seq), gcmp));
+		pn64 = atomic64_read(&key->conf.tx_pn);
 		seq->ccmp.pn[5] = pn64;
 		seq->ccmp.pn[4] = pn64 >> 8;
 		seq->ccmp.pn[3] = pn64 >> 16;
@@ -920,16 +923,6 @@
 		seq->ccmp.pn[1] = pn64 >> 32;
 		seq->ccmp.pn[0] = pn64 >> 40;
 		break;
-	case WLAN_CIPHER_SUITE_GCMP:
-	case WLAN_CIPHER_SUITE_GCMP_256:
-		pn64 = atomic64_read(&key->u.gcmp.tx_pn);
-		seq->gcmp.pn[5] = pn64;
-		seq->gcmp.pn[4] = pn64 >> 8;
-		seq->gcmp.pn[3] = pn64 >> 16;
-		seq->gcmp.pn[2] = pn64 >> 24;
-		seq->gcmp.pn[1] = pn64 >> 32;
-		seq->gcmp.pn[0] = pn64 >> 40;
-		break;
 	default:
 		WARN_ON(1);
 	}
@@ -1004,43 +997,25 @@
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 	case WLAN_CIPHER_SUITE_CCMP_256:
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+			     offsetof(typeof(*seq), aes_cmac));
+	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+			     offsetof(typeof(*seq), aes_gmac));
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+			     offsetof(typeof(*seq), gcmp));
 		pn64 = (u64)seq->ccmp.pn[5] |
 		       ((u64)seq->ccmp.pn[4] << 8) |
 		       ((u64)seq->ccmp.pn[3] << 16) |
 		       ((u64)seq->ccmp.pn[2] << 24) |
 		       ((u64)seq->ccmp.pn[1] << 32) |
 		       ((u64)seq->ccmp.pn[0] << 40);
-		atomic64_set(&key->u.ccmp.tx_pn, pn64);
-		break;
-	case WLAN_CIPHER_SUITE_AES_CMAC:
-	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
-		pn64 = (u64)seq->aes_cmac.pn[5] |
-		       ((u64)seq->aes_cmac.pn[4] << 8) |
-		       ((u64)seq->aes_cmac.pn[3] << 16) |
-		       ((u64)seq->aes_cmac.pn[2] << 24) |
-		       ((u64)seq->aes_cmac.pn[1] << 32) |
-		       ((u64)seq->aes_cmac.pn[0] << 40);
-		atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
-		break;
-	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
-	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
-		pn64 = (u64)seq->aes_gmac.pn[5] |
-		       ((u64)seq->aes_gmac.pn[4] << 8) |
-		       ((u64)seq->aes_gmac.pn[3] << 16) |
-		       ((u64)seq->aes_gmac.pn[2] << 24) |
-		       ((u64)seq->aes_gmac.pn[1] << 32) |
-		       ((u64)seq->aes_gmac.pn[0] << 40);
-		atomic64_set(&key->u.aes_gmac.tx_pn, pn64);
-		break;
-	case WLAN_CIPHER_SUITE_GCMP:
-	case WLAN_CIPHER_SUITE_GCMP_256:
-		pn64 = (u64)seq->gcmp.pn[5] |
-		       ((u64)seq->gcmp.pn[4] << 8) |
-		       ((u64)seq->gcmp.pn[3] << 16) |
-		       ((u64)seq->gcmp.pn[2] << 24) |
-		       ((u64)seq->gcmp.pn[1] << 32) |
-		       ((u64)seq->gcmp.pn[0] << 40);
-		atomic64_set(&key->u.gcmp.tx_pn, pn64);
+		atomic64_set(&key->conf.tx_pn, pn64);
 		break;
 	default:
 		WARN_ON(1);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 96557dd..3f4f9ea 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -18,7 +18,6 @@
 
 #define NUM_DEFAULT_KEYS 4
 #define NUM_DEFAULT_MGMT_KEYS 2
-#define MAX_PN_LEN 16
 
 struct ieee80211_local;
 struct ieee80211_sub_if_data;
@@ -78,7 +77,6 @@
 			u32 mic_failures;
 		} tkip;
 		struct {
-			atomic64_t tx_pn;
 			/*
 			 * Last received packet number. The first
 			 * IEEE80211_NUM_TIDS counters are used with Data
@@ -90,21 +88,18 @@
 			u32 replays; /* dot11RSNAStatsCCMPReplays */
 		} ccmp;
 		struct {
-			atomic64_t tx_pn;
 			u8 rx_pn[IEEE80211_CMAC_PN_LEN];
 			struct crypto_cipher *tfm;
 			u32 replays; /* dot11RSNAStatsCMACReplays */
 			u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
 		} aes_cmac;
 		struct {
-			atomic64_t tx_pn;
 			u8 rx_pn[IEEE80211_GMAC_PN_LEN];
 			struct crypto_aead *tfm;
 			u32 replays; /* dot11RSNAStatsCMACReplays */
 			u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
 		} aes_gmac;
 		struct {
-			atomic64_t tx_pn;
 			/* Last received packet number. The first
 			 * IEEE80211_NUM_TIDS counters are used with Data
 			 * frames and the last counter is used with Robust
@@ -116,7 +111,7 @@
 		} gcmp;
 		struct {
 			/* generic cipher scheme */
-			u8 rx_pn[IEEE80211_NUM_TIDS + 1][MAX_PN_LEN];
+			u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_MAX_PN_LEN];
 		} gen;
 	} u;
 
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index e2b8364..0505845 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -12,96 +12,175 @@
 #include <linux/export.h>
 #include "led.h"
 
-#define MAC80211_BLINK_DELAY 50 /* ms */
-
-void ieee80211_led_rx(struct ieee80211_local *local)
-{
-	unsigned long led_delay = MAC80211_BLINK_DELAY;
-	if (unlikely(!local->rx_led))
-		return;
-	led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
-}
-
-void ieee80211_led_tx(struct ieee80211_local *local)
-{
-	unsigned long led_delay = MAC80211_BLINK_DELAY;
-	if (unlikely(!local->tx_led))
-		return;
-	led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
-}
-
 void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
 {
-	if (unlikely(!local->assoc_led))
+	if (!atomic_read(&local->assoc_led_active))
 		return;
 	if (associated)
-		led_trigger_event(local->assoc_led, LED_FULL);
+		led_trigger_event(&local->assoc_led, LED_FULL);
 	else
-		led_trigger_event(local->assoc_led, LED_OFF);
+		led_trigger_event(&local->assoc_led, LED_OFF);
 }
 
 void ieee80211_led_radio(struct ieee80211_local *local, bool enabled)
 {
-	if (unlikely(!local->radio_led))
+	if (!atomic_read(&local->radio_led_active))
 		return;
 	if (enabled)
-		led_trigger_event(local->radio_led, LED_FULL);
+		led_trigger_event(&local->radio_led, LED_FULL);
 	else
-		led_trigger_event(local->radio_led, LED_OFF);
+		led_trigger_event(&local->radio_led, LED_OFF);
 }
 
-void ieee80211_led_names(struct ieee80211_local *local)
+void ieee80211_alloc_led_names(struct ieee80211_local *local)
 {
-	snprintf(local->rx_led_name, sizeof(local->rx_led_name),
-		 "%srx", wiphy_name(local->hw.wiphy));
-	snprintf(local->tx_led_name, sizeof(local->tx_led_name),
-		 "%stx", wiphy_name(local->hw.wiphy));
-	snprintf(local->assoc_led_name, sizeof(local->assoc_led_name),
-		 "%sassoc", wiphy_name(local->hw.wiphy));
-	snprintf(local->radio_led_name, sizeof(local->radio_led_name),
-		 "%sradio", wiphy_name(local->hw.wiphy));
+	local->rx_led.name = kasprintf(GFP_KERNEL, "%srx",
+				       wiphy_name(local->hw.wiphy));
+	local->tx_led.name = kasprintf(GFP_KERNEL, "%stx",
+				       wiphy_name(local->hw.wiphy));
+	local->assoc_led.name = kasprintf(GFP_KERNEL, "%sassoc",
+					  wiphy_name(local->hw.wiphy));
+	local->radio_led.name = kasprintf(GFP_KERNEL, "%sradio",
+					  wiphy_name(local->hw.wiphy));
+}
+
+void ieee80211_free_led_names(struct ieee80211_local *local)
+{
+	kfree(local->rx_led.name);
+	kfree(local->tx_led.name);
+	kfree(local->assoc_led.name);
+	kfree(local->radio_led.name);
+}
+
+static void ieee80211_tx_led_activate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     tx_led);
+
+	atomic_inc(&local->tx_led_active);
+}
+
+static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     tx_led);
+
+	atomic_dec(&local->tx_led_active);
+}
+
+static void ieee80211_rx_led_activate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     rx_led);
+
+	atomic_inc(&local->rx_led_active);
+}
+
+static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     rx_led);
+
+	atomic_dec(&local->rx_led_active);
+}
+
+static void ieee80211_assoc_led_activate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     assoc_led);
+
+	atomic_inc(&local->assoc_led_active);
+}
+
+static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     assoc_led);
+
+	atomic_dec(&local->assoc_led_active);
+}
+
+static void ieee80211_radio_led_activate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     radio_led);
+
+	atomic_inc(&local->radio_led_active);
+}
+
+static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     radio_led);
+
+	atomic_dec(&local->radio_led_active);
+}
+
+static void ieee80211_tpt_led_activate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     tpt_led);
+
+	atomic_inc(&local->tpt_led_active);
+}
+
+static void ieee80211_tpt_led_deactivate(struct led_classdev *led_cdev)
+{
+	struct ieee80211_local *local = container_of(led_cdev->trigger,
+						     struct ieee80211_local,
+						     tpt_led);
+
+	atomic_dec(&local->tpt_led_active);
 }
 
 void ieee80211_led_init(struct ieee80211_local *local)
 {
-	local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-	if (local->rx_led) {
-		local->rx_led->name = local->rx_led_name;
-		if (led_trigger_register(local->rx_led)) {
-			kfree(local->rx_led);
-			local->rx_led = NULL;
-		}
+	atomic_set(&local->rx_led_active, 0);
+	local->rx_led.activate = ieee80211_rx_led_activate;
+	local->rx_led.deactivate = ieee80211_rx_led_deactivate;
+	if (local->rx_led.name && led_trigger_register(&local->rx_led)) {
+		kfree(local->rx_led.name);
+		local->rx_led.name = NULL;
 	}
 
-	local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-	if (local->tx_led) {
-		local->tx_led->name = local->tx_led_name;
-		if (led_trigger_register(local->tx_led)) {
-			kfree(local->tx_led);
-			local->tx_led = NULL;
-		}
+	atomic_set(&local->tx_led_active, 0);
+	local->tx_led.activate = ieee80211_tx_led_activate;
+	local->tx_led.deactivate = ieee80211_tx_led_deactivate;
+	if (local->tx_led.name && led_trigger_register(&local->tx_led)) {
+		kfree(local->tx_led.name);
+		local->tx_led.name = NULL;
 	}
 
-	local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-	if (local->assoc_led) {
-		local->assoc_led->name = local->assoc_led_name;
-		if (led_trigger_register(local->assoc_led)) {
-			kfree(local->assoc_led);
-			local->assoc_led = NULL;
-		}
+	atomic_set(&local->assoc_led_active, 0);
+	local->assoc_led.activate = ieee80211_assoc_led_activate;
+	local->assoc_led.deactivate = ieee80211_assoc_led_deactivate;
+	if (local->assoc_led.name && led_trigger_register(&local->assoc_led)) {
+		kfree(local->assoc_led.name);
+		local->assoc_led.name = NULL;
 	}
 
-	local->radio_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-	if (local->radio_led) {
-		local->radio_led->name = local->radio_led_name;
-		if (led_trigger_register(local->radio_led)) {
-			kfree(local->radio_led);
-			local->radio_led = NULL;
-		}
+	atomic_set(&local->radio_led_active, 0);
+	local->radio_led.activate = ieee80211_radio_led_activate;
+	local->radio_led.deactivate = ieee80211_radio_led_deactivate;
+	if (local->radio_led.name && led_trigger_register(&local->radio_led)) {
+		kfree(local->radio_led.name);
+		local->radio_led.name = NULL;
 	}
 
+	atomic_set(&local->tpt_led_active, 0);
 	if (local->tpt_led_trigger) {
-		if (led_trigger_register(&local->tpt_led_trigger->trig)) {
+		local->tpt_led.activate = ieee80211_tpt_led_activate;
+		local->tpt_led.deactivate = ieee80211_tpt_led_deactivate;
+		if (led_trigger_register(&local->tpt_led)) {
 			kfree(local->tpt_led_trigger);
 			local->tpt_led_trigger = NULL;
 		}
@@ -110,58 +189,50 @@
 
 void ieee80211_led_exit(struct ieee80211_local *local)
 {
-	if (local->radio_led) {
-		led_trigger_unregister(local->radio_led);
-		kfree(local->radio_led);
-	}
-	if (local->assoc_led) {
-		led_trigger_unregister(local->assoc_led);
-		kfree(local->assoc_led);
-	}
-	if (local->tx_led) {
-		led_trigger_unregister(local->tx_led);
-		kfree(local->tx_led);
-	}
-	if (local->rx_led) {
-		led_trigger_unregister(local->rx_led);
-		kfree(local->rx_led);
-	}
+	if (local->radio_led.name)
+		led_trigger_unregister(&local->radio_led);
+	if (local->assoc_led.name)
+		led_trigger_unregister(&local->assoc_led);
+	if (local->tx_led.name)
+		led_trigger_unregister(&local->tx_led);
+	if (local->rx_led.name)
+		led_trigger_unregister(&local->rx_led);
 
 	if (local->tpt_led_trigger) {
-		led_trigger_unregister(&local->tpt_led_trigger->trig);
+		led_trigger_unregister(&local->tpt_led);
 		kfree(local->tpt_led_trigger);
 	}
 }
 
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 
-	return local->radio_led_name;
+	return local->radio_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_radio_led_name);
 
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 
-	return local->assoc_led_name;
+	return local->assoc_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_assoc_led_name);
 
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 
-	return local->tx_led_name;
+	return local->tx_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_tx_led_name);
 
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 
-	return local->rx_led_name;
+	return local->rx_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_rx_led_name);
 
@@ -205,16 +276,17 @@
 		}
 	}
 
-	read_lock(&tpt_trig->trig.leddev_list_lock);
-	list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+	read_lock(&local->tpt_led.leddev_list_lock);
+	list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
 		led_blink_set(led_cdev, &on, &off);
-	read_unlock(&tpt_trig->trig.leddev_list_lock);
+	read_unlock(&local->tpt_led.leddev_list_lock);
 }
 
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
-				unsigned int flags,
-				const struct ieee80211_tpt_blink *blink_table,
-				unsigned int blink_table_len)
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+				   unsigned int flags,
+				   const struct ieee80211_tpt_blink *blink_table,
+				   unsigned int blink_table_len)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct tpt_led_trigger *tpt_trig;
@@ -229,7 +301,7 @@
 	snprintf(tpt_trig->name, sizeof(tpt_trig->name),
 		 "%stpt", wiphy_name(local->hw.wiphy));
 
-	tpt_trig->trig.name = tpt_trig->name;
+	local->tpt_led.name = tpt_trig->name;
 
 	tpt_trig->blink_table = blink_table;
 	tpt_trig->blink_table_len = blink_table_len;
@@ -269,10 +341,10 @@
 	tpt_trig->running = false;
 	del_timer_sync(&tpt_trig->timer);
 
-	read_lock(&tpt_trig->trig.leddev_list_lock);
-	list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+	read_lock(&local->tpt_led.leddev_list_lock);
+	list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
 		led_set_brightness(led_cdev, LED_OFF);
-	read_unlock(&tpt_trig->trig.leddev_list_lock);
+	read_unlock(&local->tpt_led.leddev_list_lock);
 }
 
 void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index 89f4344..a7893a1 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -11,25 +11,42 @@
 #include <linux/leds.h>
 #include "ieee80211_i.h"
 
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
+static inline void ieee80211_led_rx(struct ieee80211_local *local)
+{
 #ifdef CONFIG_MAC80211_LEDS
-void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local);
+	unsigned long led_delay = MAC80211_BLINK_DELAY;
+
+	if (!atomic_read(&local->rx_led_active))
+		return;
+	led_trigger_blink_oneshot(&local->rx_led, &led_delay, &led_delay, 0);
+#endif
+}
+
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
+{
+#ifdef CONFIG_MAC80211_LEDS
+	unsigned long led_delay = MAC80211_BLINK_DELAY;
+
+	if (!atomic_read(&local->tx_led_active))
+		return;
+	led_trigger_blink_oneshot(&local->tx_led, &led_delay, &led_delay, 0);
+#endif
+}
+
+#ifdef CONFIG_MAC80211_LEDS
 void ieee80211_led_assoc(struct ieee80211_local *local,
 			 bool associated);
 void ieee80211_led_radio(struct ieee80211_local *local,
 			 bool enabled);
-void ieee80211_led_names(struct ieee80211_local *local);
+void ieee80211_alloc_led_names(struct ieee80211_local *local);
+void ieee80211_free_led_names(struct ieee80211_local *local);
 void ieee80211_led_init(struct ieee80211_local *local);
 void ieee80211_led_exit(struct ieee80211_local *local);
 void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
 				unsigned int types_on, unsigned int types_off);
 #else
-static inline void ieee80211_led_rx(struct ieee80211_local *local)
-{
-}
-static inline void ieee80211_led_tx(struct ieee80211_local *local)
-{
-}
 static inline void ieee80211_led_assoc(struct ieee80211_local *local,
 				       bool associated)
 {
@@ -38,7 +55,10 @@
 				       bool enabled)
 {
 }
-static inline void ieee80211_led_names(struct ieee80211_local *local)
+static inline void ieee80211_alloc_led_names(struct ieee80211_local *local)
+{
+}
+static inline void ieee80211_free_led_names(struct ieee80211_local *local)
 {
 }
 static inline void ieee80211_led_init(struct ieee80211_local *local)
@@ -58,7 +78,7 @@
 ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes)
 {
 #ifdef CONFIG_MAC80211_LEDS
-	if (local->tpt_led_trigger && ieee80211_is_data(fc))
+	if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
 		local->tpt_led_trigger->tx_bytes += bytes;
 #endif
 }
@@ -67,7 +87,7 @@
 ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes)
 {
 #ifdef CONFIG_MAC80211_LEDS
-	if (local->tpt_led_trigger && ieee80211_is_data(fc))
+	if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
 		local->tpt_led_trigger->rx_bytes += bytes;
 #endif
 }
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index df3051d..3c63468 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -41,9 +41,6 @@
 	unsigned int changed_flags;
 	unsigned int new_flags = 0;
 
-	if (atomic_read(&local->iff_promiscs))
-		new_flags |= FIF_PROMISC_IN_BSS;
-
 	if (atomic_read(&local->iff_allmultis))
 		new_flags |= FIF_ALLMULTI;
 
@@ -249,6 +246,7 @@
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, restart_work);
+	struct ieee80211_sub_if_data *sdata;
 
 	/* wait for scan work complete */
 	flush_workqueue(local->workqueue);
@@ -257,6 +255,8 @@
 	     "%s called with hardware scan in progress\n", __func__);
 
 	rtnl_lock();
+	list_for_each_entry(sdata, &local->interfaces, list)
+		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
 	ieee80211_scan_cancel(local);
 	ieee80211_reconfig(local);
 	rtnl_unlock();
@@ -646,7 +646,7 @@
 	skb_queue_head_init(&local->skb_queue);
 	skb_queue_head_init(&local->skb_queue_unreliable);
 
-	ieee80211_led_names(local);
+	ieee80211_alloc_led_names(local);
 
 	ieee80211_roc_setup(local);
 
@@ -661,7 +661,7 @@
 {
 	bool have_wep = !(IS_ERR(local->wep_tx_tfm) ||
 			  IS_ERR(local->wep_rx_tfm));
-	bool have_mfp = local->hw.flags & IEEE80211_HW_MFP_CAPABLE;
+	bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE);
 	int n_suites = 0, r = 0, w = 0;
 	u32 *suites;
 	static const u32 cipher_suites[] = {
@@ -681,7 +681,7 @@
 		WLAN_CIPHER_SUITE_BIP_GMAC_256,
 	};
 
-	if (local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL ||
+	if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) ||
 	    local->hw.wiphy->cipher_suites) {
 		/* If the driver advertises, or doesn't support SW crypto,
 		 * we only need to remove WEP if necessary.
@@ -771,8 +771,13 @@
 			suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
 		}
 
-		for (r = 0; r < local->hw.n_cipher_schemes; r++)
+		for (r = 0; r < local->hw.n_cipher_schemes; r++) {
 			suites[w++] = cs[r].cipher;
+			if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) {
+				kfree(suites);
+				return -EINVAL;
+			}
+		}
 	}
 
 	local->hw.wiphy->cipher_suites = suites;
@@ -792,7 +797,7 @@
 	netdev_features_t feature_whitelist;
 	struct cfg80211_chan_def dflt_chandef = {};
 
-	if (hw->flags & IEEE80211_HW_QUEUE_CONTROL &&
+	if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
 	    (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
 	     local->hw.offchannel_tx_hw_queue >= local->hw.queues))
 		return -EINVAL;
@@ -840,7 +845,8 @@
 
 	/* Only HW csum features are currently compatible with mac80211 */
 	feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-			    NETIF_F_HW_CSUM;
+			    NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
+			    NETIF_F_GSO_SOFTWARE;
 	if (WARN_ON(hw->netdev_features & ~feature_whitelist))
 		return -EINVAL;
 
@@ -939,9 +945,9 @@
 	/* mac80211 supports control port protocol changing */
 	local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
 
-	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
+	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) {
 		local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-	} else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
+	} else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) {
 		local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
 		if (hw->max_signal <= 0) {
 			result = -EINVAL;
@@ -995,7 +1001,7 @@
 		local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
 
 	/* mac80211 supports eCSA, if the driver supports STA CSA at all */
-	if (local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)
+	if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA))
 		local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING;
 
 	local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
@@ -1063,7 +1069,7 @@
 
 	/* add one default STA interface if supported */
 	if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
-	    !(hw->flags & IEEE80211_HW_NO_AUTO_VIF)) {
+	    !ieee80211_hw_check(hw, NO_AUTO_VIF)) {
 		result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL,
 					  NL80211_IFTYPE_STATION, NULL);
 		if (result)
@@ -1209,6 +1215,8 @@
 
 	sta_info_stop(local);
 
+	ieee80211_free_led_names(local);
+
 	wiphy_free(local->hw.wiphy);
 }
 EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d468424..817098a 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -680,6 +680,7 @@
 		*pos++ = 0x0;
 		*pos++ = ieee80211_frequency_to_channel(
 				csa->settings.chandef.chan->center_freq);
+		bcn->csa_current_counter = csa->settings.count;
 		bcn->csa_counter_offsets[0] = hdr_len + 6;
 		*pos++ = csa->settings.count;
 		*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 214e63b..085edc1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -510,14 +510,14 @@
 
 static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_mgmt *mgmt,
-				    const u8 *preq_elem, u32 metric)
+				    const u8 *preq_elem, u32 orig_metric)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_path *mpath = NULL;
 	const u8 *target_addr, *orig_addr;
 	const u8 *da;
 	u8 target_flags, ttl, flags;
-	u32 orig_sn, target_sn, lifetime, orig_metric;
+	u32 orig_sn, target_sn, lifetime, target_metric;
 	bool reply = false;
 	bool forward = true;
 	bool root_is_gate;
@@ -528,7 +528,6 @@
 	target_sn = PREQ_IE_TARGET_SN(preq_elem);
 	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
 	target_flags = PREQ_IE_TARGET_F(preq_elem);
-	orig_metric = metric;
 	/* Proactive PREQ gate announcements */
 	flags = PREQ_IE_FLAGS(preq_elem);
 	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
@@ -539,7 +538,7 @@
 		mhwmp_dbg(sdata, "PREQ is for us\n");
 		forward = false;
 		reply = true;
-		metric = 0;
+		target_metric = 0;
 		if (time_after(jiffies, ifmsh->last_sn_update +
 					net_traversal_jiffies(sdata)) ||
 		    time_before(jiffies, ifmsh->last_sn_update)) {
@@ -556,7 +555,7 @@
 				reply = true;
 				target_addr = sdata->vif.addr;
 				target_sn = ++ifmsh->sn;
-				metric = 0;
+				target_metric = 0;
 				ifmsh->last_sn_update = jiffies;
 			}
 			if (root_is_gate)
@@ -574,7 +573,7 @@
 			} else if ((!(target_flags & MP_F_DO)) &&
 					(mpath->flags & MESH_PATH_ACTIVE)) {
 				reply = true;
-				metric = mpath->metric;
+				target_metric = mpath->metric;
 				target_sn = mpath->sn;
 				if (target_flags & MP_F_RF)
 					target_flags |= MP_F_DO;
@@ -593,7 +592,8 @@
 			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
 					       orig_sn, 0, target_addr,
 					       target_sn, mgmt->sa, 0, ttl,
-					       lifetime, metric, 0, sdata);
+					       lifetime, target_metric, 0,
+					       sdata);
 		} else {
 			ifmsh->mshstats.dropped_frames_ttl++;
 		}
@@ -619,13 +619,12 @@
 		if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 			target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 			target_sn = PREQ_IE_TARGET_SN(preq_elem);
-			metric = orig_metric;
 		}
 
 		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
 				       orig_sn, target_flags, target_addr,
 				       target_sn, da, hopcount, ttl, lifetime,
-				       metric, preq_id, sdata);
+				       orig_metric, preq_id, sdata);
 		if (!is_multicast_ether_addr(da))
 			ifmsh->mshstats.fwded_unicast++;
 		else
@@ -854,7 +853,7 @@
 {
 	struct ieee802_11_elems elems;
 	size_t baselen;
-	u32 last_hop_metric;
+	u32 path_metric;
 	struct sta_info *sta;
 
 	/* need action_code */
@@ -877,21 +876,21 @@
 		if (elems.preq_len != 37)
 			/* Right now we support just 1 destination and no AE */
 			return;
-		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
-						      MPATH_PREQ);
-		if (last_hop_metric)
+		path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
+						  MPATH_PREQ);
+		if (path_metric)
 			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
-						last_hop_metric);
+						path_metric);
 	}
 	if (elems.prep) {
 		if (elems.prep_len != 31)
 			/* Right now we support no AE */
 			return;
-		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
-						      MPATH_PREP);
-		if (last_hop_metric)
+		path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
+						  MPATH_PREP);
+		if (path_metric)
 			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
-						last_hop_metric);
+						path_metric);
 	}
 	if (elems.perr) {
 		if (elems.perr_len != 15)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 60d737f..5438d13 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -72,10 +72,11 @@
  *
  * @sta: mesh peer link to restart
  *
- * Locking: this function must be called holding sta->lock
+ * Locking: this function must be called holding sta->plink_lock
  */
 static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 {
+	lockdep_assert_held(&sta->plink_lock);
 	sta->plink_state = NL80211_PLINK_LISTEN;
 	sta->llid = sta->plid = sta->reason = 0;
 	sta->plink_retries = 0;
@@ -105,9 +106,7 @@
 		/* (IEEE 802.11-2012 19.4.5) */
 		short_slot = true;
 		goto out;
-	} else if (band != IEEE80211_BAND_2GHZ ||
-		   (band == IEEE80211_BAND_2GHZ &&
-		    local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+	} else if (band != IEEE80211_BAND_2GHZ)
 		goto out;
 
 	for (i = 0; i < sband->n_bitrates; i++)
@@ -213,13 +212,15 @@
  * All mesh paths with this peer as next hop will be flushed
  * Returns beacon changed flag if the beacon content changed.
  *
- * Locking: the caller must hold sta->lock
+ * Locking: the caller must hold sta->plink_lock
  */
 static u32 __mesh_plink_deactivate(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	u32 changed = 0;
 
+	lockdep_assert_held(&sta->plink_lock);
+
 	if (sta->plink_state == NL80211_PLINK_ESTAB)
 		changed = mesh_plink_dec_estab_count(sdata);
 	sta->plink_state = NL80211_PLINK_BLOCKED;
@@ -244,13 +245,13 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	u32 changed;
 
-	spin_lock_bh(&sta->lock);
+	spin_lock_bh(&sta->plink_lock);
 	changed = __mesh_plink_deactivate(sta);
 	sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
 	mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
 			    sta->sta.addr, sta->llid, sta->plid,
 			    sta->reason);
-	spin_unlock_bh(&sta->lock);
+	spin_unlock_bh(&sta->plink_lock);
 
 	return changed;
 }
@@ -387,12 +388,13 @@
 	sband = local->hw.wiphy->bands[band];
 	rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
-	spin_lock_bh(&sta->lock);
+	spin_lock_bh(&sta->plink_lock);
 	sta->last_rx = jiffies;
 
 	/* rates and capabilities don't change during peering */
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
 		goto out;
+	sta->processed_beacon = true;
 
 	if (sta->sta.supp_rates[band] != rates)
 		changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -419,7 +421,7 @@
 	else
 		rate_control_rate_update(local, sband, sta, changed);
 out:
-	spin_unlock_bh(&sta->lock);
+	spin_unlock_bh(&sta->plink_lock);
 }
 
 static struct sta_info *
@@ -552,7 +554,7 @@
 	if (sta->sdata->local->quiescing)
 		return;
 
-	spin_lock_bh(&sta->lock);
+	spin_lock_bh(&sta->plink_lock);
 
 	/* If a timer fires just before a state transition on another CPU,
 	 * we may have already extended the timeout and changed state by the
@@ -563,7 +565,7 @@
 		mpl_dbg(sta->sdata,
 			"Ignoring timer for %pM in state %s (timer adjusted)",
 			sta->sta.addr, mplstates[sta->plink_state]);
-		spin_unlock_bh(&sta->lock);
+		spin_unlock_bh(&sta->plink_lock);
 		return;
 	}
 
@@ -573,7 +575,7 @@
 		mpl_dbg(sta->sdata,
 			"Ignoring timer for %pM in state %s (timer deleted)",
 			sta->sta.addr, mplstates[sta->plink_state]);
-		spin_unlock_bh(&sta->lock);
+		spin_unlock_bh(&sta->plink_lock);
 		return;
 	}
 
@@ -619,7 +621,7 @@
 	default:
 		break;
 	}
-	spin_unlock_bh(&sta->lock);
+	spin_unlock_bh(&sta->plink_lock);
 	if (action)
 		mesh_plink_frame_tx(sdata, action, sta->sta.addr,
 				    sta->llid, sta->plid, reason);
@@ -674,16 +676,16 @@
 	if (!test_sta_flag(sta, WLAN_STA_AUTH))
 		return 0;
 
-	spin_lock_bh(&sta->lock);
+	spin_lock_bh(&sta->plink_lock);
 	sta->llid = mesh_get_new_llid(sdata);
 	if (sta->plink_state != NL80211_PLINK_LISTEN &&
 	    sta->plink_state != NL80211_PLINK_BLOCKED) {
-		spin_unlock_bh(&sta->lock);
+		spin_unlock_bh(&sta->plink_lock);
 		return 0;
 	}
 	sta->plink_state = NL80211_PLINK_OPN_SNT;
 	mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
-	spin_unlock_bh(&sta->lock);
+	spin_unlock_bh(&sta->plink_lock);
 	mpl_dbg(sdata,
 		"Mesh plink: starting establishment with %pM\n",
 		sta->sta.addr);
@@ -700,10 +702,10 @@
 {
 	u32 changed;
 
-	spin_lock_bh(&sta->lock);
+	spin_lock_bh(&sta->plink_lock);
 	changed = __mesh_plink_deactivate(sta);
 	sta->plink_state = NL80211_PLINK_BLOCKED;
-	spin_unlock_bh(&sta->lock);
+	spin_unlock_bh(&sta->plink_lock);
 
 	return changed;
 }
@@ -758,7 +760,7 @@
 	mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
 		mplstates[sta->plink_state], mplevents[event]);
 
-	spin_lock_bh(&sta->lock);
+	spin_lock_bh(&sta->plink_lock);
 	switch (sta->plink_state) {
 	case NL80211_PLINK_LISTEN:
 		switch (event) {
@@ -872,7 +874,7 @@
 		 */
 		break;
 	}
-	spin_unlock_bh(&sta->lock);
+	spin_unlock_bh(&sta->plink_lock);
 	if (action) {
 		mesh_plink_frame_tx(sdata, action, sta->sta.addr,
 				    sta->llid, sta->plid, sta->reason);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 26053bf..9b2cc27 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -118,7 +118,7 @@
 	if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
 		return;
 
-	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+	if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
 		return;
 
 	mod_timer(&sdata->u.mgd.bcn_mon_timer,
@@ -134,7 +134,7 @@
 
 	ifmgd->probe_send_count = 0;
 
-	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+	if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
 		return;
 
 	mod_timer(&sdata->u.mgd.conn_mon_timer,
@@ -669,17 +669,15 @@
 	capab = WLAN_CAPABILITY_ESS;
 
 	if (sband->band == IEEE80211_BAND_2GHZ) {
-		if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
-			capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
-		if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
-			capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+		capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+		capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
 	}
 
 	if (assoc_data->capability & WLAN_CAPABILITY_PRIVACY)
 		capab |= WLAN_CAPABILITY_PRIVACY;
 
 	if ((assoc_data->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
-	    (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
+	    ieee80211_hw_check(&local->hw, SPECTRUM_MGMT))
 		capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
 
 	if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM)
@@ -887,7 +885,7 @@
 	drv_mgd_prepare_tx(local, sdata);
 
 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
-	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
 						IEEE80211_TX_INTFL_MLME_CONN_TX;
 	ieee80211_tx_skb(sdata, skb);
@@ -929,7 +927,7 @@
 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
 					IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
 
-	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
 
 	if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
@@ -1098,6 +1096,24 @@
 	ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
 
+static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+	struct sta_info *sta;
+	u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+			continue;
+
+		ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+					    NL80211_TDLS_TEARDOWN, reason,
+					    GFP_ATOMIC);
+	}
+	rcu_read_unlock();
+}
+
 static void
 ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 				 u64 timestamp, u32 device_timestamp,
@@ -1161,6 +1177,14 @@
 		return;
 	}
 
+	/*
+	 * Drop all TDLS peers - either we disconnect or move to a different
+	 * channel from this point on. There's no telling what our peer will do.
+	 * The TDLS WIDER_BW scenario is also problematic, as peers might now
+	 * have an incompatible wider chandef.
+	 */
+	ieee80211_teardown_tdls_peers(sdata);
+
 	mutex_lock(&local->mtx);
 	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -1174,7 +1198,7 @@
 	chanctx = container_of(conf, struct ieee80211_chanctx, conf);
 
 	if (local->use_chanctx &&
-	    !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
+	    !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
 		sdata_info(sdata,
 			   "driver doesn't support chan-switch with channel contexts\n");
 		goto drop_connection;
@@ -1383,15 +1407,15 @@
 		return;
 
 	if (conf->dynamic_ps_timeout > 0 &&
-	    !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
+	    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) {
 		mod_timer(&local->dynamic_ps_timer, jiffies +
 			  msecs_to_jiffies(conf->dynamic_ps_timeout));
 	} else {
-		if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
 			ieee80211_send_nullfunc(local, sdata, 1);
 
-		if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
-		    (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
+		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 			return;
 
 		conf->flags |= IEEE80211_CONF_PS;
@@ -1450,7 +1474,7 @@
 	int count = 0;
 	int timeout;
 
-	if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
+	if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) {
 		local->ps_sdata = NULL;
 		return;
 	}
@@ -1596,7 +1620,7 @@
 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 	}
 
-	if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+	if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
 	    !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
 		if (drv_tx_frames_pending(local)) {
 			mod_timer(&local->dynamic_ps_timer, jiffies +
@@ -1609,8 +1633,8 @@
 		}
 	}
 
-	if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
-	      (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
+	if (!(ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
+	      ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) ||
 	    (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
 		ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
 		local->hw.conf.flags |= IEEE80211_CONF_PS;
@@ -2135,7 +2159,7 @@
 	ieee80211_recalc_ps(local, -1);
 	mutex_unlock(&local->iflist_mtx);
 
-	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+	if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
 		goto out;
 
 	/*
@@ -2233,7 +2257,7 @@
 	 */
 	ifmgd->probe_send_count++;
 
-	if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+	if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
 		ifmgd->nullfunc_failed = false;
 		ieee80211_send_nullfunc(sdata->local, sdata, 0);
 	} else {
@@ -2495,6 +2519,34 @@
 	sdata->u.mgd.auth_data = NULL;
 }
 
+static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
+					 bool assoc)
+{
+	struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
+
+	sdata_assert_lock(sdata);
+
+	if (!assoc) {
+		/*
+		 * we are not associated yet, the only timer that could be
+		 * running is the timeout for the association response which
+		 * which is not relevant anymore.
+		 */
+		del_timer_sync(&sdata->u.mgd.timer);
+		sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
+
+		eth_zero_addr(sdata->u.mgd.bssid);
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+		sdata->u.mgd.flags = 0;
+		mutex_lock(&sdata->local->mtx);
+		ieee80211_vif_release_channel(sdata);
+		mutex_unlock(&sdata->local->mtx);
+	}
+
+	kfree(assoc_data);
+	sdata->u.mgd.assoc_data = NULL;
+}
+
 static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_mgmt *mgmt, size_t len)
 {
@@ -2510,7 +2562,7 @@
 		return;
 	auth_data->expected_transaction = 4;
 	drv_mgd_prepare_tx(sdata->local, sdata);
-	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 		tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
 			   IEEE80211_TX_INTFL_MLME_CONN_TX;
 	ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
@@ -2687,28 +2739,42 @@
 				     struct ieee80211_mgmt *mgmt, size_t len)
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-	const u8 *bssid = NULL;
-	u16 reason_code;
+	u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
 
 	sdata_assert_lock(sdata);
 
 	if (len < 24 + 2)
 		return;
 
-	if (!ifmgd->associated ||
-	    !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
+	if (ifmgd->associated &&
+	    ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
+		const u8 *bssid = ifmgd->associated->bssid;
+
+		sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
+			   bssid, reason_code,
+			   ieee80211_get_reason_code_string(reason_code));
+
+		ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
+
+		ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false,
+					    reason_code);
 		return;
+	}
 
-	bssid = ifmgd->associated->bssid;
+	if (ifmgd->assoc_data &&
+	    ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
+		const u8 *bssid = ifmgd->assoc_data->bss->bssid;
 
-	reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
+		sdata_info(sdata,
+			   "deauthenticated from %pM while associating (Reason: %u=%s)\n",
+			   bssid, reason_code,
+			   ieee80211_get_reason_code_string(reason_code));
 
-	sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
-		   bssid, reason_code, ieee80211_get_reason_code_string(reason_code));
+		ieee80211_destroy_assoc_data(sdata, false);
 
-	ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
-
-	ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
+		cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+		return;
+	}
 }
 
 
@@ -2788,34 +2854,6 @@
 	}
 }
 
-static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
-					 bool assoc)
-{
-	struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
-
-	sdata_assert_lock(sdata);
-
-	if (!assoc) {
-		/*
-		 * we are not associated yet, the only timer that could be
-		 * running is the timeout for the association response which
-		 * which is not relevant anymore.
-		 */
-		del_timer_sync(&sdata->u.mgd.timer);
-		sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
-
-		eth_zero_addr(sdata->u.mgd.bssid);
-		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
-		sdata->u.mgd.flags = 0;
-		mutex_lock(&sdata->local->mtx);
-		ieee80211_vif_release_channel(sdata);
-		mutex_unlock(&sdata->local->mtx);
-	}
-
-	kfree(assoc_data);
-	sdata->u.mgd.assoc_data = NULL;
-}
-
 static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 				    struct cfg80211_bss *cbss,
 				    struct ieee80211_mgmt *mgmt, size_t len)
@@ -3299,7 +3337,7 @@
 		}
 		ifmgd->have_beacon = true;
 		ifmgd->assoc_data->need_beacon = false;
-		if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+		if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
 			sdata->vif.bss_conf.sync_tsf =
 				le64_to_cpu(mgmt->u.beacon.timestamp);
 			sdata->vif.bss_conf.sync_device_ts =
@@ -3405,7 +3443,7 @@
 					  len - baselen, false, &elems,
 					  care_about_ies, ncrc);
 
-	if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
+	if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) {
 		bool directed_tim = ieee80211_check_tim(elems.tim,
 							elems.tim_len,
 							ifmgd->aid);
@@ -3473,7 +3511,7 @@
 	 * the driver will use them. The synchronized view is currently
 	 * guaranteed only in certain callbacks.
 	 */
-	if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+	if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
 		sdata->vif.bss_conf.sync_tsf =
 			le64_to_cpu(mgmt->u.beacon.timestamp);
 		sdata->vif.bss_conf.sync_device_ts =
@@ -3711,7 +3749,7 @@
 			auth_data->expected_transaction = trans;
 		}
 
-		if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+		if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 			tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
 				   IEEE80211_TX_INTFL_MLME_CONN_TX;
 
@@ -3784,7 +3822,7 @@
 		   IEEE80211_ASSOC_MAX_TRIES);
 	ieee80211_send_assoc(sdata);
 
-	if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+	if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 		assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
 		assoc_data->timeout_started = true;
 		run_again(sdata, assoc_data->timeout);
@@ -3898,7 +3936,7 @@
 
 		memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
 
-		if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+		if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 			max_tries = max_nullfunc_tries;
 		else
 			max_tries = max_probe_tries;
@@ -3923,7 +3961,7 @@
 			}
 		} else if (time_is_after_jiffies(ifmgd->probe_timeout))
 			run_again(sdata, ifmgd->probe_timeout);
-		else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+		else if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			mlme_dbg(sdata,
 				 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
 				 bssid, probe_wait_ms);
@@ -3992,14 +4030,11 @@
 
 static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
 {
-	u32 flags;
-
 	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 		__ieee80211_stop_poll(sdata);
 
 		/* let's probe the connection once */
-		flags = sdata->local->hw.flags;
-		if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
+		if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
 			ieee80211_queue_work(&sdata->local->hw,
 					     &sdata->u.mgd.monitor_work);
 		/* and do all the other regular work too */
@@ -4307,15 +4342,15 @@
 }
 
 static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
-				     struct cfg80211_bss *cbss, bool assoc)
+				     struct cfg80211_bss *cbss, bool assoc,
+				     bool override)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_bss *bss = (void *)cbss->priv;
 	struct sta_info *new_sta = NULL;
 	struct ieee80211_supported_band *sband;
-	struct ieee80211_sta_ht_cap sta_ht_cap;
-	bool have_sta = false, is_override = false;
+	bool have_sta = false;
 	int err;
 
 	sband = local->hw.wiphy->bands[cbss->channel->band];
@@ -4335,14 +4370,7 @@
 			return -ENOMEM;
 	}
 
-	memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
-	ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
-
-	is_override = (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) !=
-		      (sband->ht_cap.cap &
-		       IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
-	if (new_sta || is_override) {
+	if (new_sta || override) {
 		err = ieee80211_prep_channel(sdata, cbss);
 		if (err) {
 			if (new_sta)
@@ -4419,8 +4447,8 @@
 				sdata->vif.bss_conf.sync_dtim_count = tim_ie[2];
 			else
 				sdata->vif.bss_conf.sync_dtim_count = 0;
-		} else if (!(local->hw.flags &
-					IEEE80211_HW_TIMING_BEACON_ONLY)) {
+		} else if (!ieee80211_hw_check(&sdata->local->hw,
+					       TIMING_BEACON_ONLY)) {
 			ies = rcu_dereference(cbss->proberesp_ies);
 			/* must be non-NULL since beacon IEs were NULL */
 			sdata->vif.bss_conf.sync_tsf = ies->tsf;
@@ -4552,7 +4580,7 @@
 
 	sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
 
-	err = ieee80211_prep_connection(sdata, req->bss, false);
+	err = ieee80211_prep_connection(sdata, req->bss, false, false);
 	if (err)
 		goto err_clear;
 
@@ -4570,6 +4598,9 @@
 	eth_zero_addr(ifmgd->bssid);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
 	ifmgd->auth_data = NULL;
+	mutex_lock(&sdata->local->mtx);
+	ieee80211_vif_release_channel(sdata);
+	mutex_unlock(&sdata->local->mtx);
  err_free:
 	kfree(auth_data);
 	return err;
@@ -4624,6 +4655,7 @@
 	struct ieee80211_supported_band *sband;
 	const u8 *ssidie, *ht_ie, *vht_ie;
 	int i, err;
+	bool override = false;
 
 	assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
 	if (!assoc_data)
@@ -4728,14 +4760,6 @@
 		}
 	}
 
-	if (req->flags & ASSOC_REQ_DISABLE_HT) {
-		ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
-		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-	}
-
-	if (req->flags & ASSOC_REQ_DISABLE_VHT)
-		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-
 	/* Also disable HT if we don't support it or the AP doesn't use WMM */
 	sband = local->hw.wiphy->bands[req->bss->channel->band];
 	if (!sband->ht_cap.ht_supported ||
@@ -4802,7 +4826,7 @@
 	rcu_read_unlock();
 
 	if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
-		 (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
+		 ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
 	     "U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
 		sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
 
@@ -4847,14 +4871,43 @@
 	ifmgd->dtim_period = 0;
 	ifmgd->have_beacon = false;
 
-	err = ieee80211_prep_connection(sdata, req->bss, true);
+	/* override HT/VHT configuration only if the AP and we support it */
+	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+		struct ieee80211_sta_ht_cap sta_ht_cap;
+
+		if (req->flags & ASSOC_REQ_DISABLE_HT)
+			override = true;
+
+		memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
+		ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+		/* check for 40 MHz disable override */
+		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ) &&
+		    sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+		    !(sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+			override = true;
+
+		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
+		    req->flags & ASSOC_REQ_DISABLE_VHT)
+			override = true;
+	}
+
+	if (req->flags & ASSOC_REQ_DISABLE_HT) {
+		ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
+		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+	}
+
+	if (req->flags & ASSOC_REQ_DISABLE_VHT)
+		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
+	err = ieee80211_prep_connection(sdata, req->bss, true, override);
 	if (err)
 		goto err_clear;
 
 	rcu_read_lock();
 	beacon_ies = rcu_dereference(req->bss->beacon_ies);
 
-	if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC &&
+	if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC) &&
 	    !beacon_ies) {
 		/*
 		 * Wait up to one beacon interval ...
@@ -4881,7 +4934,7 @@
 		assoc_data->timeout = jiffies;
 		assoc_data->timeout_started = true;
 
-		if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+		if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
 			sdata->vif.bss_conf.sync_tsf = beacon_ies->tsf;
 			sdata->vif.bss_conf.sync_device_ts =
 				bss->device_ts_beacon;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 683f0e3..f2c75cf 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -46,7 +46,7 @@
 	}
 
 	if (!local->offchannel_ps_enabled ||
-	    !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+	    !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
 		/*
 		 * If power save was enabled, no need to send a nullfunc
 		 * frame because AP knows that we are sleeping. But if the
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index ac6ad62..06b60980 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -23,7 +23,7 @@
 
 	ieee80211_del_virtual_monitor(local);
 
-	if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
 		mutex_lock(&local->sta_mtx);
 		list_for_each_entry(sta, &local->sta_list, list) {
 			set_sta_flag(sta, WLAN_STA_BLOCK_BA);
@@ -82,7 +82,7 @@
 		if (err < 0) {
 			local->quiescing = false;
 			local->wowlan = false;
-			if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+			if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
 				mutex_lock(&local->sta_mtx);
 				list_for_each_entry(sta,
 						    &local->sta_list, list) {
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d53355b..36ba7c4 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -680,12 +680,18 @@
 		info->control.rates[i].count = 0;
 	}
 
-	if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+	if (ieee80211_hw_check(&sdata->local->hw, HAS_RATE_CONTROL))
 		return;
 
-	ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+	if (ista) {
+		spin_lock_bh(&sta->rate_ctrl_lock);
+		ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+		spin_unlock_bh(&sta->rate_ctrl_lock);
+	} else {
+		ref->ops->get_rate(ref->priv, NULL, NULL, txrc);
+	}
 
-	if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+	if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_RC_TABLE))
 		return;
 
 	ieee80211_get_tx_rates(&sdata->vif, ista, txrc->skb,
@@ -727,7 +733,7 @@
 	if (local->open_count)
 		return -EBUSY;
 
-	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+	if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
 		if (WARN_ON(!local->ops->set_rts_threshold))
 			return -EINVAL;
 		return 0;
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 38652f0..25c9be5 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -42,10 +42,12 @@
 	if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
 		return;
 
+	spin_lock_bh(&sta->rate_ctrl_lock);
 	if (ref->ops->tx_status)
 		ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
 	else
 		ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+	spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
 static inline void
@@ -64,7 +66,9 @@
 	if (WARN_ON_ONCE(!ref->ops->tx_status_noskb))
 		return;
 
+	spin_lock_bh(&sta->rate_ctrl_lock);
 	ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+	spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
 static inline void rate_control_rate_init(struct sta_info *sta)
@@ -91,8 +95,10 @@
 
 	sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
 
+	spin_lock_bh(&sta->rate_ctrl_lock);
 	ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
 			    priv_sta);
+	spin_unlock_bh(&sta->rate_ctrl_lock);
 	rcu_read_unlock();
 	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
 }
@@ -115,18 +121,20 @@
 			return;
 		}
 
+		spin_lock_bh(&sta->rate_ctrl_lock);
 		ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
 				      ista, priv_sta, changed);
+		spin_unlock_bh(&sta->rate_ctrl_lock);
 		rcu_read_unlock();
 	}
 	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
 }
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
-					   struct ieee80211_sta *sta,
-					   gfp_t gfp)
+					   struct sta_info *sta, gfp_t gfp)
 {
-	return ref->ops->alloc_sta(ref->priv, sta, gfp);
+	spin_lock_init(&sta->rate_ctrl_lock);
+	return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp);
 }
 
 static inline void rate_control_free_sta(struct sta_info *sta)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7430a1d..543b672 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1070,7 +1070,7 @@
 	if (sband->band != IEEE80211_BAND_2GHZ)
 		return;
 
-	if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
+	if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
 		return;
 
 	mi->cck_supported = 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5793f75..5dae166 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -32,6 +32,16 @@
 #include "wme.h"
 #include "rate.h"
 
+static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
+{
+	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->rx_packets++;
+	tstats->rx_bytes += len;
+	u64_stats_update_end(&tstats->syncp);
+}
+
 /*
  * monitor mode reception
  *
@@ -42,7 +52,7 @@
 					   struct sk_buff *skb,
 					   unsigned int rtap_vendor_space)
 {
-	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
+	if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
 		if (likely(skb->len > FCS_LEN))
 			__pskb_trim(skb, skb->len - FCS_LEN);
 		else {
@@ -100,7 +110,7 @@
 		len = ALIGN(len, 8);
 		len += 8;
 	}
-	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
 		len += 1;
 
 	/* antenna field, if we don't have per-chain info */
@@ -175,7 +185,7 @@
 	}
 
 	mpdulen = skb->len;
-	if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
+	if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
 		mpdulen += FCS_LEN;
 
 	rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
@@ -229,7 +239,7 @@
 	}
 
 	/* IEEE80211_RADIOTAP_FLAGS */
-	if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
+	if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
 		*pos |= IEEE80211_RADIOTAP_F_FCS;
 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
 		*pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -279,7 +289,7 @@
 	pos += 2;
 
 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
-	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
+	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
 	    !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 		*pos = status->signal;
 		rthdr->it_present |=
@@ -448,7 +458,7 @@
 	 * the SKB because it has a bad FCS/PLCP checksum.
 	 */
 
-	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
+	if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
 		present_fcs_len = FCS_LEN;
 
 	/* ensure hdr->frame_control and vendor radiotap data are in skb head */
@@ -529,8 +539,7 @@
 		}
 
 		prev_dev = sdata->dev;
-		sdata->dev->stats.rx_packets++;
-		sdata->dev->stats.rx_bytes += skb->len;
+		ieee80211_rx_stats(sdata->dev, skb->len);
 	}
 
 	if (prev_dev) {
@@ -981,7 +990,6 @@
 	struct sk_buff *skb = rx->skb;
 	struct ieee80211_local *local = rx->local;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct sta_info *sta = rx->sta;
 	struct tid_ampdu_rx *tid_agg_rx;
 	u16 sc;
@@ -1016,10 +1024,6 @@
 	    ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
 		goto dont_reorder;
 
-	/* not actually part of this BA session */
-	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-		goto dont_reorder;
-
 	/* new, potentially un-ordered, ampdu frame - process it */
 
 	/* reset session timer */
@@ -1073,10 +1077,8 @@
 		if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
 			     rx->sta->last_seq_ctrl[rx->seqno_idx] ==
 			     hdr->seq_ctrl)) {
-			if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
-				rx->local->dot11FrameDuplicateCount++;
-				rx->sta->num_duplicates++;
-			}
+			I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
+			rx->sta->num_duplicates++;
 			return RX_DROP_UNUSABLE;
 		} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
 			rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
@@ -1195,11 +1197,13 @@
 
 	atomic_inc(&ps->num_sta_ps);
 	set_sta_flag(sta, WLAN_STA_PS_STA);
-	if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+	if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
 		drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
 	ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
 	       sta->sta.addr, sta->sta.aid);
 
+	ieee80211_clear_fast_xmit(sta);
+
 	if (!sta->sta.txq[0])
 		return;
 
@@ -1241,7 +1245,7 @@
 	struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
 	bool in_ps;
 
-	WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
+	WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
 
 	/* Don't let the same PS state be set twice */
 	in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
@@ -1265,7 +1269,7 @@
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 	int tid, ac;
 
-	if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+	if (!rx->sta)
 		return RX_CONTINUE;
 
 	if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1277,7 +1281,7 @@
 	 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
 	 * it to mac80211 since they're handled.)
 	 */
-	if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+	if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
 		return RX_CONTINUE;
 
 	/*
@@ -1367,11 +1371,7 @@
 			}
 		}
 	} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
-		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
-						NL80211_IFTYPE_OCB);
-		/* OCB uses wild-card BSSID */
-		if (is_broadcast_ether_addr(bssid))
-			sta->last_rx = jiffies;
+		sta->last_rx = jiffies;
 	} else if (!is_multicast_ether_addr(hdr->addr1)) {
 		/*
 		 * Mesh beacons will update last_rx when if they are found to
@@ -1386,9 +1386,6 @@
 		}
 	}
 
-	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-		return RX_CONTINUE;
-
 	if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
 		ieee80211_sta_rx_notify(rx->sdata, hdr);
 
@@ -1416,7 +1413,7 @@
 	 * Change STA power saving mode only at the end of a frame
 	 * exchange sequence.
 	 */
-	if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
+	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
 	    !ieee80211_has_morefrags(hdr->frame_control) &&
 	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
 	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1517,13 +1514,6 @@
 	 * possible.
 	 */
 
-	/*
-	 * No point in finding a key and decrypting if the frame is neither
-	 * addressed to us nor a multicast frame.
-	 */
-	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-		return RX_CONTINUE;
-
 	/* start without a key */
 	rx->key = NULL;
 	fc = hdr->frame_control;
@@ -1795,7 +1785,7 @@
 	frag = sc & IEEE80211_SCTL_FRAG;
 
 	if (is_multicast_ether_addr(hdr->addr1)) {
-		rx->local->dot11MulticastReceivedFrameCount++;
+		I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
 		goto out_no_led;
 	}
 
@@ -1878,7 +1868,7 @@
 
 	rx->skb = __skb_dequeue(&entry->skb_list);
 	if (skb_tailroom(rx->skb) < entry->extra_len) {
-		I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
+		I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
 		if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
 					      GFP_ATOMIC))) {
 			I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2054,18 +2044,15 @@
 	struct sk_buff *skb, *xmit_skb;
 	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
 	struct sta_info *dsta;
-	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-
-	dev->stats.rx_packets++;
-	dev->stats.rx_bytes += rx->skb->len;
 
 	skb = rx->skb;
 	xmit_skb = NULL;
 
+	ieee80211_rx_stats(dev, skb->len);
+
 	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
 	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
 	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
-	    (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
 	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
 		if (is_multicast_ether_addr(ehdr->h_dest)) {
 			/*
@@ -2207,7 +2194,6 @@
 	struct sk_buff *skb = rx->skb, *fwd_skb;
 	struct ieee80211_local *local = rx->local;
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
-	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	u16 q, hdrlen;
 
@@ -2238,8 +2224,7 @@
 	    mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
 		return RX_DROP_MONITOR;
 
-	if (!ieee80211_is_data(hdr->frame_control) ||
-	    !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+	if (!ieee80211_is_data(hdr->frame_control))
 		return RX_CONTINUE;
 
 	if (!mesh_hdr->ttl)
@@ -2330,11 +2315,9 @@
 	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
 	ieee80211_add_pending_skb(local, fwd_skb);
  out:
-	if (is_multicast_ether_addr(hdr->addr1) ||
-	    sdata->dev->flags & IFF_PROMISC)
+	if (is_multicast_ether_addr(hdr->addr1))
 		return RX_CONTINUE;
-	else
-		return RX_DROP_MONITOR;
+	return RX_DROP_MONITOR;
 }
 #endif
 
@@ -2445,6 +2428,9 @@
 		struct {
 			__le16 control, start_seq_num;
 		} __packed bar_data;
+		struct ieee80211_event event = {
+			.type = BAR_RX_EVENT,
+		};
 
 		if (!rx->sta)
 			return RX_DROP_MONITOR;
@@ -2460,6 +2446,9 @@
 			return RX_DROP_MONITOR;
 
 		start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
+		event.u.ba.tid = tid;
+		event.u.ba.ssn = start_seq_num;
+		event.u.ba.sta = &rx->sta->sta;
 
 		/* reset session timer */
 		if (tid_agg_rx->timeout)
@@ -2472,6 +2461,8 @@
 						 start_seq_num, frames);
 		spin_unlock(&tid_agg_rx->reorder_lock);
 
+		drv_event_callback(rx->local, rx->sdata, &event);
+
 		kfree_skb(skb);
 		return RX_QUEUED;
 	}
@@ -2552,7 +2543,7 @@
 	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
 		int sig = 0;
 
-		if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+		if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
 			sig = status->signal;
 
 		cfg80211_report_obss_beacon(rx->local->hw.wiphy,
@@ -2561,9 +2552,6 @@
 		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
 	}
 
-	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-		return RX_DROP_MONITOR;
-
 	if (ieee80211_drop_unencrypted_mgmt(rx))
 		return RX_DROP_UNUSABLE;
 
@@ -2591,9 +2579,6 @@
 	    mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
 		return RX_DROP_UNUSABLE;
 
-	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-		return RX_DROP_UNUSABLE;
-
 	switch (mgmt->u.action.category) {
 	case WLAN_CATEGORY_HT:
 		/* reject HT action frames from stations not supporting HT */
@@ -2889,7 +2874,7 @@
 	 * it transmitted were processed or returned.
 	 */
 
-	if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+	if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
 		sig = status->signal;
 
 	if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
@@ -2954,7 +2939,7 @@
 			info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
 				      IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
 				      IEEE80211_TX_CTL_NO_CCK_RATE;
-			if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+			if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
 				info->hw_queue =
 					local->hw.offchannel_tx_hw_queue;
 		}
@@ -3077,8 +3062,7 @@
 		}
 
 		prev_dev = sdata->dev;
-		sdata->dev->stats.rx_packets++;
-		sdata->dev->stats.rx_bytes += skb->len;
+		ieee80211_rx_stats(sdata->dev, skb->len);
 	}
 
 	if (prev_dev) {
@@ -3246,16 +3230,25 @@
 	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
 	spin_unlock(&tid_agg_rx->reorder_lock);
 
+	if (!skb_queue_empty(&frames)) {
+		struct ieee80211_event event = {
+			.type = BA_FRAME_TIMEOUT,
+			.u.ba.tid = tid,
+			.u.ba.sta = &sta->sta,
+		};
+		drv_event_callback(rx.local, rx.sdata, &event);
+	}
+
 	ieee80211_rx_handlers(&rx, &frames);
 }
 
 /* main receive path */
 
-static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
-				 struct ieee80211_hdr *hdr)
+static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
 {
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
 	struct sk_buff *skb = rx->skb;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
 	int multicast = is_multicast_ether_addr(hdr->addr1);
@@ -3264,30 +3257,23 @@
 	case NL80211_IFTYPE_STATION:
 		if (!bssid && !sdata->u.mgd.use_4addr)
 			return false;
-		if (!multicast &&
-		    !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
-			if (!(sdata->dev->flags & IFF_PROMISC) ||
-			    sdata->u.mgd.use_4addr)
-				return false;
-			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-		}
-		break;
+		if (multicast)
+			return true;
+		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
 	case NL80211_IFTYPE_ADHOC:
 		if (!bssid)
 			return false;
 		if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
 		    ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
 			return false;
-		if (ieee80211_is_beacon(hdr->frame_control)) {
+		if (ieee80211_is_beacon(hdr->frame_control))
 			return true;
-		} else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
+		if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
 			return false;
-		} else if (!multicast &&
-			   !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
-			if (!(sdata->dev->flags & IFF_PROMISC))
-				return false;
-			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-		} else if (!rx->sta) {
+		if (!multicast &&
+		    !ether_addr_equal(sdata->vif.addr, hdr->addr1))
+			return false;
+		if (!rx->sta) {
 			int rate_idx;
 			if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
 				rate_idx = 0; /* TODO: HT/VHT rates */
@@ -3296,25 +3282,18 @@
 			ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
 						 BIT(rate_idx));
 		}
-		break;
+		return true;
 	case NL80211_IFTYPE_OCB:
 		if (!bssid)
 			return false;
-		if (ieee80211_is_beacon(hdr->frame_control)) {
+		if (ieee80211_is_beacon(hdr->frame_control))
 			return false;
-		} else if (!is_broadcast_ether_addr(bssid)) {
-			ocb_dbg(sdata, "BSSID mismatch in OCB mode!\n");
+		if (!is_broadcast_ether_addr(bssid))
 			return false;
-		} else if (!multicast &&
-			   !ether_addr_equal(sdata->dev->dev_addr,
-					     hdr->addr1)) {
-			/* if we are in promisc mode we also accept
-			 * packets not destined for us
-			 */
-			if (!(sdata->dev->flags & IFF_PROMISC))
-				return false;
-			rx->flags &= ~IEEE80211_RX_RA_MATCH;
-		} else if (!rx->sta) {
+		if (!multicast &&
+		    !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
+			return false;
+		if (!rx->sta) {
 			int rate_idx;
 			if (status->flag & RX_FLAG_HT)
 				rate_idx = 0; /* TODO: HT rates */
@@ -3323,22 +3302,17 @@
 			ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
 						BIT(rate_idx));
 		}
-		break;
+		return true;
 	case NL80211_IFTYPE_MESH_POINT:
-		if (!multicast &&
-		    !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
-			if (!(sdata->dev->flags & IFF_PROMISC))
-				return false;
-
-			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-		}
-		break;
+		if (multicast)
+			return true;
+		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_AP:
-		if (!bssid) {
-			if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
-				return false;
-		} else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
+		if (!bssid)
+			return ether_addr_equal(sdata->vif.addr, hdr->addr1);
+
+		if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
 			/*
 			 * Accept public action frames even when the
 			 * BSSID doesn't match, this is used for P2P
@@ -3350,10 +3324,10 @@
 				return false;
 			if (ieee80211_is_public_action(hdr, skb->len))
 				return true;
-			if (!ieee80211_is_beacon(hdr->frame_control))
-				return false;
-			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-		} else if (!ieee80211_has_tods(hdr->frame_control)) {
+			return ieee80211_is_beacon(hdr->frame_control);
+		}
+
+		if (!ieee80211_has_tods(hdr->frame_control)) {
 			/* ignore data frames to TDLS-peers */
 			if (ieee80211_is_data(hdr->frame_control))
 				return false;
@@ -3362,30 +3336,22 @@
 			    !ether_addr_equal(bssid, hdr->addr1))
 				return false;
 		}
-		break;
+		return true;
 	case NL80211_IFTYPE_WDS:
 		if (bssid || !ieee80211_is_data(hdr->frame_control))
 			return false;
-		if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
-			return false;
-		break;
+		return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
 	case NL80211_IFTYPE_P2P_DEVICE:
-		if (!ieee80211_is_public_action(hdr, skb->len) &&
-		    !ieee80211_is_probe_req(hdr->frame_control) &&
-		    !ieee80211_is_probe_resp(hdr->frame_control) &&
-		    !ieee80211_is_beacon(hdr->frame_control))
-			return false;
-		if (!ether_addr_equal(sdata->vif.addr, hdr->addr1) &&
-		    !multicast)
-			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-		break;
+		return ieee80211_is_public_action(hdr, skb->len) ||
+		       ieee80211_is_probe_req(hdr->frame_control) ||
+		       ieee80211_is_probe_resp(hdr->frame_control) ||
+		       ieee80211_is_beacon(hdr->frame_control);
 	default:
-		/* should never get here */
-		WARN_ON_ONCE(1);
 		break;
 	}
 
-	return true;
+	WARN_ON_ONCE(1);
+	return false;
 }
 
 /*
@@ -3399,13 +3365,10 @@
 {
 	struct ieee80211_local *local = rx->local;
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
-	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-	struct ieee80211_hdr *hdr = (void *)skb->data;
 
 	rx->skb = skb;
-	status->rx_flags |= IEEE80211_RX_RA_MATCH;
 
-	if (!prepare_for_handlers(rx, hdr))
+	if (!ieee80211_accept_frame(rx))
 		return false;
 
 	if (!consume) {
@@ -3448,7 +3411,7 @@
 	rx.local = local;
 
 	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
-		local->dot11ReceivedFragmentCount++;
+		I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
 
 	if (ieee80211_is_mgmt(fc)) {
 		/* drop frame if too short for header */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 7bb6a93..11d0901 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -6,7 +6,7 @@
  * Copyright 2005, Devicescape Software, Inc.
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2013-2015  Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -69,10 +69,11 @@
 	int clen, srlen;
 	enum nl80211_bss_scan_width scan_width;
 	s32 signal = 0;
+	bool signal_valid;
 
-	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
 		signal = rx_status->signal * 100;
-	else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
+	else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
 		signal = (rx_status->signal * 100) / local->hw.max_signal;
 
 	scan_width = NL80211_BSS_CHAN_WIDTH_20;
@@ -86,6 +87,11 @@
 					       GFP_ATOMIC);
 	if (!cbss)
 		return NULL;
+	/* In case the signal is invalid update the status */
+	signal_valid = abs(channel->center_freq - cbss->channel->center_freq)
+		<= local->hw.wiphy->max_adj_channel_rssi_comp;
+	if (!signal_valid)
+		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
 	bss = (void *)cbss->priv;
 
@@ -257,7 +263,7 @@
 	if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
 		return false;
 
-	if (local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) {
+	if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
 		for (i = 0; i < req->n_channels; i++) {
 			local->hw_scan_req->req.channels[i] = req->channels[i];
 			bands_used |= BIT(req->channels[i]->band);
@@ -326,7 +332,7 @@
 		return;
 
 	if (hw_scan && !aborted &&
-	    !(local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) &&
+	    !ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) &&
 	    ieee80211_prep_hw_scan(local)) {
 		int rc;
 
@@ -520,7 +526,7 @@
 
 		local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
 
-		if (local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) {
+		if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
 			int i, n_bands = 0;
 			u8 bands_counted = 0;
 
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 2880f2a..666ddac 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -71,6 +71,7 @@
 	.key_offset = offsetof(struct sta_info, sta.addr),
 	.key_len = ETH_ALEN,
 	.hashfn = sta_addr_hash,
+	.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
 };
 
 /* Caller must hold local->sta_mtx */
@@ -281,12 +282,12 @@
 static int sta_prepare_rate_control(struct ieee80211_local *local,
 				    struct sta_info *sta, gfp_t gfp)
 {
-	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+	if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
 		return 0;
 
 	sta->rate_ctrl = local->rate_ctrl;
 	sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
-						     &sta->sta, gfp);
+						     sta, gfp);
 	if (!sta->rate_ctrl_priv)
 		return -ENOMEM;
 
@@ -312,6 +313,7 @@
 	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
 	mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
+	spin_lock_init(&sta->plink_lock);
 	if (ieee80211_vif_is_mesh(&sdata->vif) &&
 	    !sdata->u.mesh.user_mpm)
 		init_timer(&sta->plink_timer);
@@ -641,7 +643,7 @@
 	}
 
 	/* No need to do anything if the driver does all */
-	if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+	if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
 		return;
 
 	if (sta->dead)
@@ -1146,7 +1148,7 @@
 	sta->driver_buffered_tids = 0;
 	sta->txq_buffered_tids = 0;
 
-	if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+	if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
 		drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
 
 	if (sta->sta.txq[0]) {
@@ -1217,6 +1219,8 @@
 	ps_dbg(sdata,
 	       "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
 	       sta->sta.addr, sta->sta.aid, filtered, buffered);
+
+	ieee80211_check_fast_xmit(sta);
 }
 
 static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1615,6 +1619,7 @@
 
 	if (block) {
 		set_sta_flag(sta, WLAN_STA_PS_DRIVER);
+		ieee80211_clear_fast_xmit(sta);
 		return;
 	}
 
@@ -1632,6 +1637,7 @@
 		ieee80211_queue_work(hw, &sta->drv_deliver_wk);
 	} else {
 		clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+		ieee80211_check_fast_xmit(sta);
 	}
 }
 EXPORT_SYMBOL(ieee80211_sta_block_awake);
@@ -1736,6 +1742,7 @@
 			     !sta->sdata->u.vlan.sta))
 				atomic_dec(&sta->sdata->bss->num_mcast_sta);
 			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+			ieee80211_clear_fast_xmit(sta);
 		}
 		break;
 	case IEEE80211_STA_AUTHORIZED:
@@ -1745,6 +1752,7 @@
 			     !sta->sdata->u.vlan.sta))
 				atomic_inc(&sta->sdata->bss->num_mcast_sta);
 			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+			ieee80211_check_fast_xmit(sta);
 		}
 		break;
 	default:
@@ -1871,8 +1879,8 @@
 		sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
 	}
 
-	if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
-	    (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
+	if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
+	    ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
 		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
 			sinfo->signal = (s8)sta->last_signal;
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
@@ -1924,7 +1932,7 @@
 
 		if (!(tidstats->filled &
 				BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
-		    local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			tidstats->filled |=
 				BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
 			tidstats->tx_msdu_retries = sta->tx_msdu_retries[i];
@@ -1932,7 +1940,7 @@
 
 		if (!(tidstats->filled &
 				BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
-		    local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			tidstats->filled |=
 				BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
 			tidstats->tx_msdu_failed = sta->tx_msdu_failed[i];
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5c164fb..226f8ca4 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -241,6 +241,34 @@
 /* Value to indicate no TID reservation */
 #define IEEE80211_TID_UNRESERVED	0xff
 
+#define IEEE80211_FAST_XMIT_MAX_IV	18
+
+/**
+ * struct ieee80211_fast_tx - TX fastpath information
+ * @key: key to use for hw crypto
+ * @hdr: the 802.11 header to put with the frame
+ * @hdr_len: actual 802.11 header length
+ * @sa_offs: offset of the SA
+ * @da_offs: offset of the DA
+ * @pn_offs: offset where to put PN for crypto (or 0 if not needed)
+ * @band: band this will be transmitted on, for tx_info
+ * @rcu_head: RCU head to free this struct
+ *
+ * This struct is small enough so that the common case (maximum crypto
+ * header length of 8 like for CCMP/GCMP) fits into a single 64-byte
+ * cache line.
+ */
+struct ieee80211_fast_tx {
+	struct ieee80211_key *key;
+	u8 hdr_len;
+	u8 sa_offs, da_offs, pn_offs;
+	u8 band;
+	u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
+	       sizeof(rfc1042_header)];
+
+	struct rcu_head rcu_head;
+};
+
 /**
  * struct sta_info - STA information
  *
@@ -257,6 +285,8 @@
  * @gtk: group keys negotiated with this station, if any
  * @gtk_idx: last installed group key index
  * @rate_ctrl: rate control algorithm reference
+ * @rate_ctrl_lock: spinlock used to protect rate control data
+ *	(data inside the algorithm, so serializes calls there)
  * @rate_ctrl_priv: rate control private per-STA pointer
  * @last_tx_rate: rate used for last transmit, to report to userspace as
  *	"the" transmit rate
@@ -295,10 +325,10 @@
  * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: number of bytes transmitted to this STA
- * @tx_fragments: number of transmitted MPDUs
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
+ * @plink_lock: serialize access to plink fields
  * @llid: Local link ID
  * @plid: Peer link ID
  * @reason: Cancel reason on PLINK_HOLDING state
@@ -338,6 +368,9 @@
  *	using IEEE80211_NUM_TID entry for non-QoS frames
  * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
  *	entry for non-QoS frames
+ * @fast_tx: TX fastpath information
+ * @processed_beacon: set to true after peer rates and capabilities are
+ *	processed
  */
 struct sta_info {
 	/* General information, mostly static */
@@ -352,8 +385,11 @@
 	u8 ptk_idx;
 	struct rate_control_ref *rate_ctrl;
 	void *rate_ctrl_priv;
+	spinlock_t rate_ctrl_lock;
 	spinlock_t lock;
 
+	struct ieee80211_fast_tx __rcu *fast_tx;
+
 	struct work_struct drv_deliver_wk;
 
 	u16 listen_interval;
@@ -400,7 +436,6 @@
 	unsigned int fail_avg;
 
 	/* Updated from TX path only, no locking requirements */
-	u32 tx_fragments;
 	u64 tx_packets[IEEE80211_NUM_ACS];
 	u64 tx_bytes[IEEE80211_NUM_ACS];
 	struct ieee80211_tx_rate last_tx_rate;
@@ -422,9 +457,10 @@
 
 #ifdef CONFIG_MAC80211_MESH
 	/*
-	 * Mesh peer link attributes
+	 * Mesh peer link attributes, protected by plink_lock.
 	 * TODO: move to a sub-structure that is referenced with pointer?
 	 */
+	spinlock_t plink_lock;
 	u16 llid;
 	u16 plid;
 	u16 reason;
@@ -432,12 +468,14 @@
 	enum nl80211_plink_state plink_state;
 	u32 plink_timeout;
 	struct timer_list plink_timer;
+
 	s64 t_offset;
 	s64 t_offset_setpoint;
 	/* mesh power save */
 	enum nl80211_mesh_power_mode local_pm;
 	enum nl80211_mesh_power_mode peer_pm;
 	enum nl80211_mesh_power_mode nonpeer_pm;
+	bool processed_beacon;
 #endif
 
 #ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 005fdbe..45628f3 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -181,7 +181,7 @@
 	struct ieee80211_local *local = sta->local;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 		sta->last_rx = jiffies;
 
 	if (ieee80211_is_data_qos(mgmt->frame_control)) {
@@ -414,8 +414,7 @@
 
 	if (is_teardown) {
 		/* This mechanism relies on being able to get ACKs */
-		WARN_ON(!(local->hw.flags &
-			  IEEE80211_HW_REPORTS_TX_ACK_STATUS));
+		WARN_ON(!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS));
 
 		/* Check if peer has ACKed */
 		if (flags & IEEE80211_TX_STAT_ACK) {
@@ -429,6 +428,74 @@
 	}
 }
 
+static struct ieee80211_sub_if_data *
+ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
+{
+	struct ieee80211_sub_if_data *sdata;
+
+	if (skb->dev) {
+		list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+			if (!sdata->dev)
+				continue;
+
+			if (skb->dev == sdata->dev)
+				return sdata;
+		}
+
+		return NULL;
+	}
+
+	return rcu_dereference(local->p2p_sdata);
+}
+
+static void ieee80211_report_ack_skb(struct ieee80211_local *local,
+				     struct ieee80211_tx_info *info,
+				     bool acked, bool dropped)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local->ack_status_lock, flags);
+	skb = idr_find(&local->ack_status_frames, info->ack_frame_id);
+	if (skb)
+		idr_remove(&local->ack_status_frames, info->ack_frame_id);
+	spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+	if (!skb)
+		return;
+
+	if (dropped) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
+		u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
+		struct ieee80211_sub_if_data *sdata;
+		struct ieee80211_hdr *hdr = (void *)skb->data;
+
+		rcu_read_lock();
+		sdata = ieee80211_sdata_from_skb(local, skb);
+		if (sdata) {
+			if (ieee80211_is_nullfunc(hdr->frame_control) ||
+			    ieee80211_is_qos_nullfunc(hdr->frame_control))
+				cfg80211_probe_status(sdata->dev, hdr->addr1,
+						      cookie, acked,
+						      GFP_ATOMIC);
+			else
+				cfg80211_mgmt_tx_status(&sdata->wdev, cookie,
+							skb->data, skb->len,
+							acked, GFP_ATOMIC);
+		}
+		rcu_read_unlock();
+
+		dev_kfree_skb_any(skb);
+	} else {
+		/* consumes skb */
+		skb_complete_wifi_ack(skb, acked);
+	}
+}
+
 static void ieee80211_report_used_skb(struct ieee80211_local *local,
 				      struct sk_buff *skb, bool dropped)
 {
@@ -439,28 +506,12 @@
 	if (dropped)
 		acked = false;
 
-	if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
-			   IEEE80211_TX_INTFL_MLME_CONN_TX)) {
-		struct ieee80211_sub_if_data *sdata = NULL;
-		struct ieee80211_sub_if_data *iter_sdata;
-		u64 cookie = (unsigned long)skb;
+	if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+		struct ieee80211_sub_if_data *sdata;
 
 		rcu_read_lock();
 
-		if (skb->dev) {
-			list_for_each_entry_rcu(iter_sdata, &local->interfaces,
-						list) {
-				if (!iter_sdata->dev)
-					continue;
-
-				if (skb->dev == iter_sdata->dev) {
-					sdata = iter_sdata;
-					break;
-				}
-			}
-		} else {
-			sdata = rcu_dereference(local->p2p_sdata);
-		}
+		sdata = ieee80211_sdata_from_skb(local, skb);
 
 		if (!sdata) {
 			skb->dev = NULL;
@@ -478,38 +529,14 @@
 				ieee80211_mgd_conn_tx_status(sdata,
 							     hdr->frame_control,
 							     acked);
-		} else if (ieee80211_is_nullfunc(hdr->frame_control) ||
-			   ieee80211_is_qos_nullfunc(hdr->frame_control)) {
-			cfg80211_probe_status(sdata->dev, hdr->addr1,
-					      cookie, acked, GFP_ATOMIC);
 		} else {
-			cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
-						skb->len, acked, GFP_ATOMIC);
+			/* we assign ack frame ID for the others */
+			WARN_ON(1);
 		}
 
 		rcu_read_unlock();
-	}
-
-	if (unlikely(info->ack_frame_id)) {
-		struct sk_buff *ack_skb;
-		unsigned long flags;
-
-		spin_lock_irqsave(&local->ack_status_lock, flags);
-		ack_skb = idr_find(&local->ack_status_frames,
-				   info->ack_frame_id);
-		if (ack_skb)
-			idr_remove(&local->ack_status_frames,
-				   info->ack_frame_id);
-		spin_unlock_irqrestore(&local->ack_status_lock, flags);
-
-		if (ack_skb) {
-			if (!dropped) {
-				/* consumes ack_skb */
-				skb_complete_wifi_ack(ack_skb, acked);
-			} else {
-				dev_kfree_skb_any(ack_skb);
-			}
-		}
+	} else if (info->ack_frame_id) {
+		ieee80211_report_ack_skb(local, info, acked, dropped);
 	}
 }
 
@@ -631,15 +658,15 @@
 	}
 
 	if (acked || noack_success) {
-		    local->dot11TransmittedFrameCount++;
-		    if (!pubsta)
-			    local->dot11MulticastTransmittedFrameCount++;
-		    if (retry_count > 0)
-			    local->dot11RetryCount++;
-		    if (retry_count > 1)
-			    local->dot11MultipleRetryCount++;
+		I802_DEBUG_INC(local->dot11TransmittedFrameCount);
+		if (!pubsta)
+			I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
+		if (retry_count > 0)
+			I802_DEBUG_INC(local->dot11RetryCount);
+		if (retry_count > 1)
+			I802_DEBUG_INC(local->dot11MultipleRetryCount);
 	} else {
-		local->dot11FailedCount++;
+		I802_DEBUG_INC(local->dot11FailedCount);
 	}
 }
 EXPORT_SYMBOL(ieee80211_tx_status_noskb);
@@ -703,7 +730,7 @@
 					ieee80211_get_qos_ctl(hdr),
 					sta, true, acked);
 
-		if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
+		if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
 		    (ieee80211_is_data(hdr->frame_control)) &&
 		    (rates_idx != -1))
 			sta->last_tx_rate = info->status.rates[rates_idx];
@@ -770,11 +797,11 @@
 			ieee80211_frame_acked(sta, skb);
 
 		if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
-		    (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
 			ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
 						acked, info->status.tx_time);
 
-		if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+		if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			if (info->flags & IEEE80211_TX_STAT_ACK) {
 				if (sta->lost_packets)
 					sta->lost_packets = 0;
@@ -802,13 +829,13 @@
 	if ((info->flags & IEEE80211_TX_STAT_ACK) ||
 	    (info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) {
 		if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
-			local->dot11TransmittedFrameCount++;
+			I802_DEBUG_INC(local->dot11TransmittedFrameCount);
 			if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
-				local->dot11MulticastTransmittedFrameCount++;
+				I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
 			if (retry_count > 0)
-				local->dot11RetryCount++;
+				I802_DEBUG_INC(local->dot11RetryCount);
 			if (retry_count > 1)
-				local->dot11MultipleRetryCount++;
+				I802_DEBUG_INC(local->dot11MultipleRetryCount);
 		}
 
 		/* This counter shall be incremented for an acknowledged MPDU
@@ -818,14 +845,14 @@
 		if (!is_multicast_ether_addr(hdr->addr1) ||
 		    ieee80211_is_data(fc) ||
 		    ieee80211_is_mgmt(fc))
-			local->dot11TransmittedFragmentCount++;
+			I802_DEBUG_INC(local->dot11TransmittedFragmentCount);
 	} else {
 		if (ieee80211_is_first_frag(hdr->seq_ctrl))
-			local->dot11FailedCount++;
+			I802_DEBUG_INC(local->dot11FailedCount);
 	}
 
 	if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
-	    (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+	    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
 	    !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
 	    local->ps_sdata && !(local->scanning)) {
 		if (info->flags & IEEE80211_TX_STAT_ACK) {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index fff0d86..ad31b2d 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -167,23 +167,16 @@
 static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
 					u16 status_code)
 {
-	struct ieee80211_local *local = sdata->local;
-	u16 capab;
-
 	/* The capability will be 0 when sending a failure code */
 	if (status_code != 0)
 		return 0;
 
-	capab = 0;
-	if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
-		return capab;
+	if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_2GHZ) {
+		return WLAN_CAPABILITY_SHORT_SLOT_TIME |
+		       WLAN_CAPABILITY_SHORT_PREAMBLE;
+	}
 
-	if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
-		capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
-	if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
-		capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
-
-	return capab;
+	return 0;
 }
 
 static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
@@ -527,30 +520,19 @@
 
 	/* if HT support is only added in TDLS, we need an HT-operation IE */
 	if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
-		struct ieee80211_chanctx_conf *chanctx_conf =
-				rcu_dereference(sdata->vif.chanctx_conf);
-		if (!WARN_ON(!chanctx_conf)) {
-			pos = skb_put(skb, 2 +
-				      sizeof(struct ieee80211_ht_operation));
-			/* send an empty HT operation IE */
-			ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
-						   &chanctx_conf->def, 0);
-		}
+		pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
+		/* send an empty HT operation IE */
+		ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
+					   &sdata->vif.bss_conf.chandef, 0);
 	}
 
 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
 
 	/* only include VHT-operation if not on the 2.4GHz band */
-	if (band != IEEE80211_BAND_2GHZ && !ap_sta->sta.vht_cap.vht_supported &&
-	    sta->sta.vht_cap.vht_supported) {
-		struct ieee80211_chanctx_conf *chanctx_conf =
-				rcu_dereference(sdata->vif.chanctx_conf);
-		if (!WARN_ON(!chanctx_conf)) {
-			pos = skb_put(skb, 2 +
-				      sizeof(struct ieee80211_vht_operation));
-			ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
-						    &chanctx_conf->def);
-		}
+	if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+		pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
+		ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
+					    &sdata->vif.bss_conf.chandef);
 	}
 
 	rcu_read_unlock();
@@ -953,7 +935,7 @@
 	 * packet through the AP.
 	 */
 	if ((action_code == WLAN_TDLS_TEARDOWN) &&
-	    (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+	    ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
 		bool try_resend; /* Should we keep skb for possible resend */
 
 		/* If not sending directly to peer - no point in keeping skb */
@@ -1194,6 +1176,12 @@
 
 	switch (oper) {
 	case NL80211_TDLS_ENABLE_LINK:
+		if (sdata->vif.csa_active) {
+			tdls_dbg(sdata, "TDLS: disallow link during CSA\n");
+			ret = -EBUSY;
+			break;
+		}
+
 		rcu_read_lock();
 		sta = sta_info_get(sdata, peer);
 		if (!sta) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 4c2e769..6f14591 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -69,6 +69,17 @@
 #define CHANCTX_PR_ARG	CHANDEF_PR_ARG,	MIN_CHANDEF_PR_ARG,				\
 			__entry->rx_chains_static, __entry->rx_chains_dynamic
 
+#define KEY_ENTRY	__field(u32, cipher)						\
+			__field(u8, hw_key_idx)						\
+			__field(u8, flags)						\
+			__field(s8, keyidx)
+#define KEY_ASSIGN(k)	__entry->cipher = (k)->cipher;					\
+			__entry->flags = (k)->flags;					\
+			__entry->keyidx = (k)->keyidx;					\
+			__entry->hw_key_idx = (k)->hw_key_idx;
+#define KEY_PR_FMT	" cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
+#define KEY_PR_ARG	__entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
+
 
 
 /*
@@ -522,25 +533,19 @@
 		LOCAL_ENTRY
 		VIF_ENTRY
 		STA_ENTRY
-		__field(u32, cipher)
-		__field(u8, hw_key_idx)
-		__field(u8, flags)
-		__field(s8, keyidx)
+		KEY_ENTRY
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
 		STA_ASSIGN;
-		__entry->cipher = key->cipher;
-		__entry->flags = key->flags;
-		__entry->keyidx = key->keyidx;
-		__entry->hw_key_idx = key->hw_key_idx;
+		KEY_ASSIGN(key);
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT,
-		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT KEY_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, KEY_PR_ARG
 	)
 );
 
@@ -656,28 +661,25 @@
 	)
 );
 
-TRACE_EVENT(drv_get_tkip_seq,
+TRACE_EVENT(drv_get_key_seq,
 	TP_PROTO(struct ieee80211_local *local,
-		 u8 hw_key_idx, u32 *iv32, u16 *iv16),
+		 struct ieee80211_key_conf *key),
 
-	TP_ARGS(local, hw_key_idx, iv32, iv16),
+	TP_ARGS(local, key),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
-		__field(u8, hw_key_idx)
-		__field(u32, iv32)
-		__field(u16, iv16)
+		KEY_ENTRY
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
-		__entry->hw_key_idx = hw_key_idx;
-		__entry->iv32 = *iv32;
-		__entry->iv16 = *iv16;
+		KEY_ASSIGN(key);
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT, LOCAL_PR_ARG
+		LOCAL_PR_FMT KEY_PR_FMT,
+		LOCAL_PR_ARG, KEY_PR_ARG
 	)
 );
 
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 667111e..8410bb3 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -37,6 +37,16 @@
 
 /* misc utils */
 
+static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
+{
+	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->tx_packets++;
+	tstats->tx_bytes += len;
+	u64_stats_update_end(&tstats->syncp);
+}
+
 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
 				 struct sk_buff *skb, int group_addr,
 				 int next_frag_len)
@@ -201,11 +211,11 @@
 	struct ieee80211_if_managed *ifmgd;
 
 	/* driver doesn't support power save */
-	if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+	if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
 		return TX_CONTINUE;
 
 	/* hardware does dynamic power save */
-	if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+	if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 		return TX_CONTINUE;
 
 	/* dynamic power save disabled */
@@ -421,7 +431,7 @@
 	if (ieee80211_is_probe_req(hdr->frame_control))
 		return TX_CONTINUE;
 
-	if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+	if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
 		info->hw_queue = tx->sdata->vif.cab_queue;
 
 	/* no stations in PS mode */
@@ -431,7 +441,7 @@
 	info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
 
 	/* device releases frame after DTIM beacon */
-	if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
+	if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
 		return TX_CONTINUE;
 
 	/* buffered in mac80211 */
@@ -987,7 +997,6 @@
 
 	skb_queue_walk(&tx->skbs, skb) {
 		ac = skb_get_queue_mapping(skb);
-		tx->sta->tx_fragments++;
 		tx->sta->tx_bytes[ac] += skb->len;
 	}
 	if (ac >= 0)
@@ -1176,8 +1185,8 @@
 
 	if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
 	    !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
-	    (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
-	    !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
+	    ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
+	    !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
 		struct tid_ampdu_tx *tid_tx;
 
 		qc = ieee80211_get_qos_ctl(hdr);
@@ -1420,7 +1429,7 @@
 			vif = &sdata->vif;
 			info->hw_queue =
 				vif->hw_queue[skb_get_queue_mapping(skb)];
-		} else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
+		} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
 			dev_kfree_skb(skb);
 			return true;
 		} else
@@ -1466,7 +1475,7 @@
 	CALL_TXH(ieee80211_tx_h_ps_buf);
 	CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
 	CALL_TXH(ieee80211_tx_h_select_key);
-	if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+	if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
 		CALL_TXH(ieee80211_tx_h_rate_ctrl);
 
 	if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
@@ -1481,7 +1490,7 @@
 	/* handlers after fragment must be aware of tx info fragmentation! */
 	CALL_TXH(ieee80211_tx_h_stats);
 	CALL_TXH(ieee80211_tx_h_encrypt);
-	if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+	if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
 		CALL_TXH(ieee80211_tx_h_calculate_duration);
 #undef CALL_TXH
 
@@ -1571,7 +1580,7 @@
 
 	/* set up hw_queue value early */
 	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
-	    !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+	    !ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
 		info->hw_queue =
 			sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
@@ -1598,9 +1607,9 @@
 	}
 
 	if (skb_cloned(skb) &&
-	    (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CLONED_SKBS) ||
+	    (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
 	     !skb_clone_writable(skb, ETH_HLEN) ||
-	     sdata->crypto_tx_tailroom_needed_cnt))
+	     (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
 		I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
 	else if (head_need || tail_need)
 		I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -2387,12 +2396,455 @@
 	return ERR_PTR(ret);
 }
 
+/*
+ * fast-xmit overview
+ *
+ * The core idea of this fast-xmit is to remove per-packet checks by checking
+ * them out of band. ieee80211_check_fast_xmit() implements the out-of-band
+ * checks that are needed to get the sta->fast_tx pointer assigned, after which
+ * much less work can be done per packet. For example, fragmentation must be
+ * disabled or the fast_tx pointer will not be set. All the conditions are seen
+ * in the code here.
+ *
+ * Once assigned, the fast_tx data structure also caches the per-packet 802.11
+ * header and other data to aid packet processing in ieee80211_xmit_fast().
+ *
+ * The most difficult part of this is that when any of these assumptions
+ * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
+ * ieee80211_check_fast_xmit() or friends) is required to reset the data,
+ * since the per-packet code no longer checks the conditions. This is reflected
+ * by the calls to these functions throughout the rest of the code, and must be
+ * maintained if any of the TX path checks change.
+ */
+
+void ieee80211_check_fast_xmit(struct sta_info *sta)
+{
+	struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
+	struct ieee80211_local *local = sta->local;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct ieee80211_hdr *hdr = (void *)build.hdr;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	__le16 fc;
+
+	if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
+		return;
+
+	/* Locking here protects both the pointer itself, and against concurrent
+	 * invocations winning data access races to, e.g., the key pointer that
+	 * is used.
+	 * Without it, the invocation of this function right after the key
+	 * pointer changes wouldn't be sufficient, as another CPU could access
+	 * the pointer, then stall, and then do the cache update after the CPU
+	 * that invalidated the key.
+	 * With the locking, such scenarios cannot happen as the check for the
+	 * key and the fast-tx assignment are done atomically, so the CPU that
+	 * modifies the key will either wait or other one will see the key
+	 * cleared/changed already.
+	 */
+	spin_lock_bh(&sta->lock);
+	if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
+	    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
+	    sdata->vif.type == NL80211_IFTYPE_STATION)
+		goto out;
+
+	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+		goto out;
+
+	if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+	    test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
+	    test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+		goto out;
+
+	if (sdata->noack_map)
+		goto out;
+
+	/* fast-xmit doesn't handle fragmentation at all */
+	if (local->hw.wiphy->frag_threshold != (u32)-1 &&
+	    !local->ops->set_frag_threshold)
+		goto out;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!chanctx_conf) {
+		rcu_read_unlock();
+		goto out;
+	}
+	build.band = chanctx_conf->def.chan->band;
+	rcu_read_unlock();
+
+	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_ADHOC:
+		/* DA SA BSSID */
+		build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+		build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+		memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
+		build.hdr_len = 24;
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+			/* DA SA BSSID */
+			build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+			build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+			memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+			build.hdr_len = 24;
+			break;
+		}
+
+		if (sdata->u.mgd.use_4addr) {
+			/* non-regular ethertype cannot use the fastpath */
+			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+					  IEEE80211_FCTL_TODS);
+			/* RA TA DA SA */
+			memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+			memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+			build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+			build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
+			build.hdr_len = 30;
+			break;
+		}
+		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+		/* BSSID SA DA */
+		memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+		build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+		build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+		build.hdr_len = 24;
+		break;
+	case NL80211_IFTYPE_AP_VLAN:
+		if (sdata->wdev.use_4addr) {
+			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+					  IEEE80211_FCTL_TODS);
+			/* RA TA DA SA */
+			memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+			memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+			build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+			build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
+			build.hdr_len = 30;
+			break;
+		}
+		/* fall through */
+	case NL80211_IFTYPE_AP:
+		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+		/* DA BSSID SA */
+		build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+		build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
+		build.hdr_len = 24;
+		break;
+	default:
+		/* not handled on fast-xmit */
+		goto out;
+	}
+
+	if (sta->sta.wme) {
+		build.hdr_len += 2;
+		fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+	}
+
+	/* We store the key here so there's no point in using rcu_dereference()
+	 * but that's fine because the code that changes the pointers will call
+	 * this function after doing so. For a single CPU that would be enough,
+	 * for multiple see the comment above.
+	 */
+	build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
+	if (!build.key)
+		build.key = rcu_access_pointer(sdata->default_unicast_key);
+	if (build.key) {
+		bool gen_iv, iv_spc, mmic;
+
+		gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
+		iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+		/* don't handle software crypto */
+		if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+			goto out;
+
+		switch (build.key->conf.cipher) {
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_CCMP_256:
+			/* add fixed key ID */
+			if (gen_iv) {
+				(build.hdr + build.hdr_len)[3] =
+					0x20 | (build.key->conf.keyidx << 6);
+				build.pn_offs = build.hdr_len;
+			}
+			if (gen_iv || iv_spc)
+				build.hdr_len += IEEE80211_CCMP_HDR_LEN;
+			break;
+		case WLAN_CIPHER_SUITE_GCMP:
+		case WLAN_CIPHER_SUITE_GCMP_256:
+			/* add fixed key ID */
+			if (gen_iv) {
+				(build.hdr + build.hdr_len)[3] =
+					0x20 | (build.key->conf.keyidx << 6);
+				build.pn_offs = build.hdr_len;
+			}
+			if (gen_iv || iv_spc)
+				build.hdr_len += IEEE80211_GCMP_HDR_LEN;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			/* cannot handle MMIC or IV generation in xmit-fast */
+			if (mmic || gen_iv)
+				goto out;
+			if (iv_spc)
+				build.hdr_len += IEEE80211_TKIP_IV_LEN;
+			break;
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			/* cannot handle IV generation in fast-xmit */
+			if (gen_iv)
+				goto out;
+			if (iv_spc)
+				build.hdr_len += IEEE80211_WEP_IV_LEN;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+		case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+			WARN(1,
+			     "management cipher suite 0x%x enabled for data\n",
+			     build.key->conf.cipher);
+			goto out;
+		default:
+			/* we don't know how to generate IVs for this at all */
+			if (WARN_ON(gen_iv))
+				goto out;
+			/* pure hardware keys are OK, of course */
+			if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
+				break;
+			/* cipher scheme might require space allocation */
+			if (iv_spc &&
+			    build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
+				goto out;
+			if (iv_spc)
+				build.hdr_len += build.key->conf.iv_len;
+		}
+
+		fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+	}
+
+	hdr->frame_control = fc;
+
+	memcpy(build.hdr + build.hdr_len,
+	       rfc1042_header,  sizeof(rfc1042_header));
+	build.hdr_len += sizeof(rfc1042_header);
+
+	fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
+	/* if the kmemdup fails, continue w/o fast_tx */
+	if (!fast_tx)
+		goto out;
+
+ out:
+	/* we might have raced against another call to this function */
+	old = rcu_dereference_protected(sta->fast_tx,
+					lockdep_is_held(&sta->lock));
+	rcu_assign_pointer(sta->fast_tx, fast_tx);
+	if (old)
+		kfree_rcu(old, rcu_head);
+	spin_unlock_bh(&sta->lock);
+}
+
+void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
+{
+	struct sta_info *sta;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &local->sta_list, list)
+		ieee80211_check_fast_xmit(sta);
+	rcu_read_unlock();
+}
+
+void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (sdata != sta->sdata &&
+		    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
+			continue;
+		ieee80211_check_fast_xmit(sta);
+	}
+
+	rcu_read_unlock();
+}
+
+void ieee80211_clear_fast_xmit(struct sta_info *sta)
+{
+	struct ieee80211_fast_tx *fast_tx;
+
+	spin_lock_bh(&sta->lock);
+	fast_tx = rcu_dereference_protected(sta->fast_tx,
+					    lockdep_is_held(&sta->lock));
+	RCU_INIT_POINTER(sta->fast_tx, NULL);
+	spin_unlock_bh(&sta->lock);
+
+	if (fast_tx)
+		kfree_rcu(fast_tx, rcu_head);
+}
+
+static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
+				struct net_device *dev, struct sta_info *sta,
+				struct ieee80211_fast_tx *fast_tx,
+				struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	u16 ethertype = (skb->data[12] << 8) | skb->data[13];
+	int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
+	int hw_headroom = sdata->local->hw.extra_tx_headroom;
+	struct ethhdr eth;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
+	struct ieee80211_tx_data tx;
+	ieee80211_tx_result r;
+	struct tid_ampdu_tx *tid_tx = NULL;
+	u8 tid = IEEE80211_NUM_TIDS;
+
+	/* control port protocol needs a lot of special handling */
+	if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
+		return false;
+
+	/* only RFC 1042 SNAP */
+	if (ethertype < ETH_P_802_3_MIN)
+		return false;
+
+	/* don't handle TX status request here either */
+	if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
+		return false;
+
+	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+		tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+		if (tid_tx) {
+			if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
+				return false;
+			if (tid_tx->timeout)
+				tid_tx->last_tx = jiffies;
+		}
+	}
+
+	/* after this point (skb is modified) we cannot return false */
+
+	if (skb_shared(skb)) {
+		struct sk_buff *tmp_skb = skb;
+
+		skb = skb_clone(skb, GFP_ATOMIC);
+		kfree_skb(tmp_skb);
+
+		if (!skb)
+			return true;
+	}
+
+	ieee80211_tx_stats(dev, skb->len + extra_head);
+
+	/* will not be crypto-handled beyond what we do here, so use false
+	 * as the may-encrypt argument for the resize to not account for
+	 * more room than we already have in 'extra_head'
+	 */
+	if (unlikely(ieee80211_skb_resize(sdata, skb,
+					  max_t(int, extra_head + hw_headroom -
+						     skb_headroom(skb), 0),
+					  false))) {
+		kfree_skb(skb);
+		return true;
+	}
+
+	memcpy(&eth, skb->data, ETH_HLEN - 2);
+	hdr = (void *)skb_push(skb, extra_head);
+	memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
+	memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
+	memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
+
+	memset(info, 0, sizeof(*info));
+	info->band = fast_tx->band;
+	info->control.vif = &sdata->vif;
+	info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
+		      IEEE80211_TX_CTL_DONTFRAG |
+		      (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
+
+	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+		*ieee80211_get_qos_ctl(hdr) = tid;
+		hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
+	} else {
+		info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+		hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
+		sdata->sequence_number += 0x10;
+	}
+
+	sta->tx_msdu[tid]++;
+
+	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
+	__skb_queue_head_init(&tx.skbs);
+
+	tx.flags = IEEE80211_TX_UNICAST;
+	tx.local = local;
+	tx.sdata = sdata;
+	tx.sta = sta;
+	tx.key = fast_tx->key;
+
+	if (fast_tx->key)
+		info->control.hw_key = &fast_tx->key->conf;
+
+	if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+		tx.skb = skb;
+		r = ieee80211_tx_h_rate_ctrl(&tx);
+		skb = tx.skb;
+		tx.skb = NULL;
+
+		if (r != TX_CONTINUE) {
+			if (r != TX_QUEUED)
+				kfree_skb(skb);
+			return true;
+		}
+	}
+
+	/* statistics normally done by ieee80211_tx_h_stats (but that
+	 * has to consider fragmentation, so is more complex)
+	 */
+	sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len;
+	sta->tx_packets[skb_get_queue_mapping(skb)]++;
+
+	if (fast_tx->pn_offs) {
+		u64 pn;
+		u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
+
+		switch (fast_tx->key->conf.cipher) {
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_CCMP_256:
+		case WLAN_CIPHER_SUITE_GCMP:
+		case WLAN_CIPHER_SUITE_GCMP_256:
+			pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
+			crypto_hdr[0] = pn;
+			crypto_hdr[1] = pn >> 8;
+			crypto_hdr[4] = pn >> 16;
+			crypto_hdr[5] = pn >> 24;
+			crypto_hdr[6] = pn >> 32;
+			crypto_hdr[7] = pn >> 40;
+			break;
+		}
+	}
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		sdata = container_of(sdata->bss,
+				     struct ieee80211_sub_if_data, u.ap);
+
+	__skb_queue_tail(&tx.skbs, skb);
+	ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
+	return true;
+}
+
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 				  struct net_device *dev,
 				  u32 info_flags)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct sta_info *sta;
+	struct sk_buff *next;
 
 	if (unlikely(skb->len < ETH_HLEN)) {
 		kfree_skb(skb);
@@ -2401,20 +2853,67 @@
 
 	rcu_read_lock();
 
-	if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
-		kfree_skb(skb);
-		goto out;
+	if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+		goto out_free;
+
+	if (!IS_ERR_OR_NULL(sta)) {
+		struct ieee80211_fast_tx *fast_tx;
+
+		fast_tx = rcu_dereference(sta->fast_tx);
+
+		if (fast_tx &&
+		    ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
+			goto out;
 	}
 
-	skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
-	if (IS_ERR(skb))
-		goto out;
+	if (skb_is_gso(skb)) {
+		struct sk_buff *segs;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-	dev->trans_start = jiffies;
+		segs = skb_gso_segment(skb, 0);
+		if (IS_ERR(segs)) {
+			goto out_free;
+		} else if (segs) {
+			consume_skb(skb);
+			skb = segs;
+		}
+	} else {
+		/* we cannot process non-linear frames on this path */
+		if (skb_linearize(skb)) {
+			kfree_skb(skb);
+			goto out;
+		}
 
-	ieee80211_xmit(sdata, sta, skb);
+		/* the frame could be fragmented, software-encrypted, and other
+		 * things so we cannot really handle checksum offload with it -
+		 * fix it up in software before we handle anything else.
+		 */
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			skb_set_transport_header(skb,
+						 skb_checksum_start_offset(skb));
+			if (skb_checksum_help(skb))
+				goto out_free;
+		}
+	}
+
+	next = skb;
+	while (next) {
+		skb = next;
+		next = skb->next;
+
+		skb->prev = NULL;
+		skb->next = NULL;
+
+		skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
+		if (IS_ERR(skb))
+			goto out;
+
+		ieee80211_tx_stats(dev, skb->len);
+
+		ieee80211_xmit(sdata, sta, skb);
+	}
+	goto out;
+ out_free:
+	kfree_skb(skb);
  out:
 	rcu_read_unlock();
 }
@@ -3308,7 +3807,7 @@
 	synchronize_net();
 
 	/* Tear down BA sessions so we stop aggregating on this TID */
-	if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
 		set_sta_flag(sta, WLAN_STA_BLOCK_BA);
 		__ieee80211_stop_tx_ba_session(sta, tid,
 					       AGG_STOP_LOCAL_REQUEST);
@@ -3322,7 +3821,7 @@
 	ieee80211_wake_vif_queues(local, sdata,
 				  IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
 
-	if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
+	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
 		clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
 	ret = 0;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index b864ebc..43e5aad 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -564,7 +564,7 @@
 {
 	unsigned int queues;
 
-	if (sdata && local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
+	if (sdata && ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
 		int ac;
 
 		queues = 0;
@@ -592,7 +592,7 @@
 	 * If no queue was set, or if the HW doesn't support
 	 * IEEE80211_HW_QUEUE_CONTROL - flush all queues
 	 */
-	if (!queues || !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+	if (!queues || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
 		queues = ieee80211_get_vif_queues(local, sdata);
 
 	ieee80211_stop_queues_by_reason(&local->hw, queues,
@@ -2046,7 +2046,7 @@
 	 * about the sessions, but we and the AP still think they
 	 * are active. This is really a workaround though.
 	 */
-	if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
 		mutex_lock(&local->sta_mtx);
 
 		list_for_each_entry(sta, &local->sta_list, list) {
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9d63d93..943f760 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -444,7 +444,7 @@
 	hdr = (struct ieee80211_hdr *) pos;
 	pos += hdrlen;
 
-	pn64 = atomic64_inc_return(&key->u.ccmp.tx_pn);
+	pn64 = atomic64_inc_return(&key->conf.tx_pn);
 
 	pn[5] = pn64;
 	pn[4] = pn64 >> 8;
@@ -670,7 +670,7 @@
 	hdr = (struct ieee80211_hdr *)pos;
 	pos += hdrlen;
 
-	pn64 = atomic64_inc_return(&key->u.gcmp.tx_pn);
+	pn64 = atomic64_inc_return(&key->conf.tx_pn);
 
 	pn[5] = pn64;
 	pn[4] = pn64 >> 8;
@@ -940,7 +940,7 @@
 	mmie->key_id = cpu_to_le16(key->conf.keyidx);
 
 	/* PN = PN + 1 */
-	pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+	pn64 = atomic64_inc_return(&key->conf.tx_pn);
 
 	bip_ipn_set64(mmie->sequence_number, pn64);
 
@@ -984,7 +984,7 @@
 	mmie->key_id = cpu_to_le16(key->conf.keyidx);
 
 	/* PN = PN + 1 */
-	pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+	pn64 = atomic64_inc_return(&key->conf.tx_pn);
 
 	bip_ipn_set64(mmie->sequence_number, pn64);
 
@@ -1129,7 +1129,7 @@
 	mmie->key_id = cpu_to_le16(key->conf.keyidx);
 
 	/* PN = PN + 1 */
-	pn64 = atomic64_inc_return(&key->u.aes_gmac.tx_pn);
+	pn64 = atomic64_inc_return(&key->conf.tx_pn);
 
 	bip_ipn_set64(mmie->sequence_number, pn64);
 
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
index aa462b4..fb45287 100644
--- a/net/mac802154/Kconfig
+++ b/net/mac802154/Kconfig
@@ -2,6 +2,7 @@
 	tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
 	depends on IEEE802154
 	select CRC_CCITT
+	select CRYPTO
 	select CRYPTO_AUTHENC
 	select CRYPTO_CCM
 	select CRYPTO_CTR
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index 702d8b4..17a51e8 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,5 +1,7 @@
 obj-$(CONFIG_MAC802154)	+= mac802154.o
 mac802154-objs		:= main.o rx.o tx.o mac_cmd.o mib.o \
-			   iface.o llsec.o util.o cfg.o
+			   iface.o llsec.o util.o cfg.o trace.o
+
+CFLAGS_trace.o := -I$(src)
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 70be9c7..317c466 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -73,9 +73,9 @@
 
 	ASSERT_RTNL();
 
-	/* check if phy support this setting */
-	if (!(wpan_phy->channels_supported[page] & BIT(channel)))
-		return -EINVAL;
+	if (wpan_phy->current_page == page &&
+	    wpan_phy->current_channel == channel)
+		return 0;
 
 	ret = drv_set_channel(local, page, channel);
 	if (!ret) {
@@ -95,9 +95,8 @@
 
 	ASSERT_RTNL();
 
-	/* check if phy support this setting */
-	if (!(local->hw.flags & IEEE802154_HW_CCA_MODE))
-		return -EOPNOTSUPP;
+	if (wpan_phy_cca_cmp(&wpan_phy->cca, cca))
+		return 0;
 
 	ret = drv_set_cca_mode(local, cca);
 	if (!ret)
@@ -107,20 +106,49 @@
 }
 
 static int
+ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level)
+{
+	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+	int ret;
+
+	ASSERT_RTNL();
+
+	if (wpan_phy->cca_ed_level == ed_level)
+		return 0;
+
+	ret = drv_set_cca_ed_level(local, ed_level);
+	if (!ret)
+		wpan_phy->cca_ed_level = ed_level;
+
+	return ret;
+}
+
+static int
+ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power)
+{
+	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+	int ret;
+
+	ASSERT_RTNL();
+
+	if (wpan_phy->transmit_power == power)
+		return 0;
+
+	ret = drv_set_tx_power(local, power);
+	if (!ret)
+		wpan_phy->transmit_power = power;
+
+	return ret;
+}
+
+static int
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 		      __le16 pan_id)
 {
 	ASSERT_RTNL();
 
-	/* TODO
-	 * I am not sure about to check here on broadcast pan_id.
-	 * Broadcast is a valid setting, comment from 802.15.4:
-	 * If this value is 0xffff, the device is not associated.
-	 *
-	 * This could useful to simple deassociate an device.
-	 */
-	if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
-		return -EINVAL;
+	if (wpan_dev->pan_id == pan_id)
+		return 0;
 
 	wpan_dev->pan_id = pan_id;
 	return 0;
@@ -131,12 +159,11 @@
 				struct wpan_dev *wpan_dev,
 				u8 min_be, u8 max_be)
 {
-	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
 	ASSERT_RTNL();
 
-	if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
-		return -EOPNOTSUPP;
+	if (wpan_dev->min_be == min_be &&
+	    wpan_dev->max_be == max_be)
+		return 0;
 
 	wpan_dev->min_be = min_be;
 	wpan_dev->max_be = max_be;
@@ -149,20 +176,8 @@
 {
 	ASSERT_RTNL();
 
-	/* TODO
-	 * I am not sure about to check here on broadcast short_addr.
-	 * Broadcast is a valid setting, comment from 802.15.4:
-	 * A value of 0xfffe indicates that the device has
-	 * associated but has not been allocated an address. A
-	 * value of 0xffff indicates that the device does not
-	 * have a short address.
-	 *
-	 * I think we should allow to set these settings but
-	 * don't allow to allow socket communication with it.
-	 */
-	if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
-	    short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
-		return -EINVAL;
+	if (wpan_dev->short_addr == short_addr)
+		return 0;
 
 	wpan_dev->short_addr = short_addr;
 	return 0;
@@ -173,12 +188,10 @@
 				 struct wpan_dev *wpan_dev,
 				 u8 max_csma_backoffs)
 {
-	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
 	ASSERT_RTNL();
 
-	if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
-		return -EOPNOTSUPP;
+	if (wpan_dev->csma_retries == max_csma_backoffs)
+		return 0;
 
 	wpan_dev->csma_retries = max_csma_backoffs;
 	return 0;
@@ -189,12 +202,10 @@
 				 struct wpan_dev *wpan_dev,
 				 s8 max_frame_retries)
 {
-	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
 	ASSERT_RTNL();
 
-	if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES))
-		return -EOPNOTSUPP;
+	if (wpan_dev->frame_retries == max_frame_retries)
+		return 0;
 
 	wpan_dev->frame_retries = max_frame_retries;
 	return 0;
@@ -204,12 +215,10 @@
 ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 			bool mode)
 {
-	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
 	ASSERT_RTNL();
 
-	if (!(local->hw.flags & IEEE802154_HW_LBT))
-		return -EOPNOTSUPP;
+	if (wpan_dev->lbt == mode)
+		return 0;
 
 	wpan_dev->lbt = mode;
 	return 0;
@@ -222,6 +231,8 @@
 	.del_virtual_intf = ieee802154_del_iface,
 	.set_channel = ieee802154_set_channel,
 	.set_cca_mode = ieee802154_set_cca_mode,
+	.set_cca_ed_level = ieee802154_set_cca_ed_level,
+	.set_tx_power = ieee802154_set_tx_power,
 	.set_pan_id = ieee802154_set_pan_id,
 	.set_short_addr = ieee802154_set_short_addr,
 	.set_backoff_exponent = ieee802154_set_backoff_exponent,
diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h
index a053335..0550f33 100644
--- a/net/mac802154/driver-ops.h
+++ b/net/mac802154/driver-ops.h
@@ -7,6 +7,7 @@
 #include <net/mac802154.h>
 
 #include "ieee802154_i.h"
+#include "trace.h"
 
 static inline int
 drv_xmit_async(struct ieee802154_local *local, struct sk_buff *skb)
@@ -27,19 +28,25 @@
 
 static inline int drv_start(struct ieee802154_local *local)
 {
+	int ret;
+
 	might_sleep();
 
+	trace_802154_drv_start(local);
 	local->started = true;
 	smp_mb();
-
-	return local->ops->start(&local->hw);
+	ret = local->ops->start(&local->hw);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline void drv_stop(struct ieee802154_local *local)
 {
 	might_sleep();
 
+	trace_802154_drv_stop(local);
 	local->ops->stop(&local->hw);
+	trace_802154_drv_return_void(local);
 
 	/* sync away all work on the tasklet before clearing started */
 	tasklet_disable(&local->tasklet);
@@ -53,13 +60,20 @@
 static inline int
 drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
 {
+	int ret;
+
 	might_sleep();
 
-	return local->ops->set_channel(&local->hw, page, channel);
+	trace_802154_drv_set_channel(local, page, channel);
+	ret = local->ops->set_channel(&local->hw, page, channel);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
-static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
+static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_txpower) {
@@ -67,12 +81,17 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_txpower(&local->hw, dbm);
+	trace_802154_drv_set_tx_power(local, mbm);
+	ret = local->ops->set_txpower(&local->hw, mbm);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int drv_set_cca_mode(struct ieee802154_local *local,
 				   const struct wpan_phy_cca *cca)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_cca_mode) {
@@ -80,11 +99,16 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_cca_mode(&local->hw, cca);
+	trace_802154_drv_set_cca_mode(local, cca);
+	ret = local->ops->set_cca_mode(&local->hw, cca);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_lbt) {
@@ -92,12 +116,17 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_lbt(&local->hw, mode);
+	trace_802154_drv_set_lbt_mode(local, mode);
+	ret = local->ops->set_lbt(&local->hw, mode);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
-drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
+drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_cca_ed_level) {
@@ -105,12 +134,16 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_cca_ed_level(&local->hw, ed_level);
+	trace_802154_drv_set_cca_ed_level(local, mbm);
+	ret = local->ops->set_cca_ed_level(&local->hw, mbm);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
 {
 	struct ieee802154_hw_addr_filt filt;
+	int ret;
 
 	might_sleep();
 
@@ -121,14 +154,18 @@
 
 	filt.pan_id = pan_id;
 
-	return local->ops->set_hw_addr_filt(&local->hw, &filt,
+	trace_802154_drv_set_pan_id(local, pan_id);
+	ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
 					    IEEE802154_AFILT_PANID_CHANGED);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
 drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr)
 {
 	struct ieee802154_hw_addr_filt filt;
+	int ret;
 
 	might_sleep();
 
@@ -139,14 +176,18 @@
 
 	filt.ieee_addr = extended_addr;
 
-	return local->ops->set_hw_addr_filt(&local->hw, &filt,
+	trace_802154_drv_set_extended_addr(local, extended_addr);
+	ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
 					    IEEE802154_AFILT_IEEEADDR_CHANGED);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
 drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr)
 {
 	struct ieee802154_hw_addr_filt filt;
+	int ret;
 
 	might_sleep();
 
@@ -157,14 +198,18 @@
 
 	filt.short_addr = short_addr;
 
-	return local->ops->set_hw_addr_filt(&local->hw, &filt,
+	trace_802154_drv_set_short_addr(local, short_addr);
+	ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
 					    IEEE802154_AFILT_SADDR_CHANGED);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
 drv_set_pan_coord(struct ieee802154_local *local, bool is_coord)
 {
 	struct ieee802154_hw_addr_filt filt;
+	int ret;
 
 	might_sleep();
 
@@ -175,14 +220,19 @@
 
 	filt.pan_coord = is_coord;
 
-	return local->ops->set_hw_addr_filt(&local->hw, &filt,
+	trace_802154_drv_set_pan_coord(local, is_coord);
+	ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
 					    IEEE802154_AFILT_PANC_CHANGED);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
 drv_set_csma_params(struct ieee802154_local *local, u8 min_be, u8 max_be,
 		    u8 max_csma_backoffs)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_csma_params) {
@@ -190,13 +240,19 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_csma_params(&local->hw, min_be, max_be,
+	trace_802154_drv_set_csma_params(local, min_be, max_be,
+					 max_csma_backoffs);
+	ret = local->ops->set_csma_params(&local->hw, min_be, max_be,
 					   max_csma_backoffs);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
 drv_set_max_frame_retries(struct ieee802154_local *local, s8 max_frame_retries)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_frame_retries) {
@@ -204,12 +260,17 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_frame_retries(&local->hw, max_frame_retries);
+	trace_802154_drv_set_max_frame_retries(local, max_frame_retries);
+	ret = local->ops->set_frame_retries(&local->hw, max_frame_retries);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 static inline int
 drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
 {
+	int ret;
+
 	might_sleep();
 
 	if (!local->ops->set_promiscuous_mode) {
@@ -217,7 +278,10 @@
 		return -EOPNOTSUPP;
 	}
 
-	return local->ops->set_promiscuous_mode(&local->hw, on);
+	trace_802154_drv_set_promiscuous_mode(local, on);
+	ret = local->ops->set_promiscuous_mode(&local->hw, on);
+	trace_802154_drv_return_int(local, ret);
+	return ret;
 }
 
 #endif /* __MAC802154_DRIVER_OPS */
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 127ba18..34755d5 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -86,16 +86,12 @@
 	unsigned long state;
 	char name[IFNAMSIZ];
 
-	spinlock_t mib_lock;
-
 	/* protects sec from concurrent access by netlink. access by
 	 * encrypt/decrypt/header_create safe without additional protection.
 	 */
 	struct mutex sec_mtx;
 
 	struct mac802154_llsec sec;
-	/* must be last, dynamically sized area in this! */
-	struct ieee802154_vif vif;
 };
 
 #define MAC802154_CHAN_NONE		0xff /* No channel is assigned */
@@ -136,12 +132,7 @@
 enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
 
 /* MIB callbacks */
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val);
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev);
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev);
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val);
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
-u8 mac802154_dev_get_dsn(const struct net_device *dev);
 
 int mac802154_get_params(struct net_device *dev,
 			 struct ieee802154_llsec_params *params);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 91b75ab..8b69824 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -62,9 +62,10 @@
 		(struct sockaddr_ieee802154 *)&ifr->ifr_addr;
 	int err = -ENOIOCTLCMD;
 
-	ASSERT_RTNL();
+	if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR)
+		return err;
 
-	spin_lock_bh(&sdata->mib_lock);
+	rtnl_lock();
 
 	switch (cmd) {
 	case SIOCGIFADDR:
@@ -89,7 +90,7 @@
 	}
 	case SIOCSIFADDR:
 		if (netif_running(dev)) {
-			spin_unlock_bh(&sdata->mib_lock);
+			rtnl_unlock();
 			return -EBUSY;
 		}
 
@@ -111,7 +112,7 @@
 		break;
 	}
 
-	spin_unlock_bh(&sdata->mib_lock);
+	rtnl_unlock();
 	return err;
 }
 
@@ -125,7 +126,7 @@
 		return -EBUSY;
 
 	ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
-	if (!ieee802154_is_valid_extended_addr(extended_addr))
+	if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
 		return -EINVAL;
 
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
@@ -134,19 +135,72 @@
 	return mac802154_wpan_update_llsec(dev);
 }
 
+static int ieee802154_setup_hw(struct ieee802154_sub_if_data *sdata)
+{
+	struct ieee802154_local *local = sdata->local;
+	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+	int ret;
+
+	if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
+		ret = drv_set_promiscuous_mode(local,
+					       wpan_dev->promiscuous_mode);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (local->hw.flags & IEEE802154_HW_AFILT) {
+		ret = drv_set_pan_id(local, wpan_dev->pan_id);
+		if (ret < 0)
+			return ret;
+
+		ret = drv_set_extended_addr(local, wpan_dev->extended_addr);
+		if (ret < 0)
+			return ret;
+
+		ret = drv_set_short_addr(local, wpan_dev->short_addr);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (local->hw.flags & IEEE802154_HW_LBT) {
+		ret = drv_set_lbt_mode(local, wpan_dev->lbt);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
+		ret = drv_set_csma_params(local, wpan_dev->min_be,
+					  wpan_dev->max_be,
+					  wpan_dev->csma_retries);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
+		ret = drv_set_max_frame_retries(local, wpan_dev->frame_retries);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int mac802154_slave_open(struct net_device *dev)
 {
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 	struct ieee802154_local *local = sdata->local;
-	int res = 0;
+	int res;
 
 	ASSERT_RTNL();
 
 	set_bit(SDATA_STATE_RUNNING, &sdata->state);
 
 	if (!local->open_count) {
+		res = ieee802154_setup_hw(sdata);
+		if (res)
+			goto err;
+
 		res = drv_start(local);
-		WARN_ON(res);
 		if (res)
 			goto err;
 	}
@@ -218,8 +272,8 @@
 			 * exist really an use case if we need to support
 			 * multiple node types at the same time.
 			 */
-			if (sdata->vif.type == NL802154_IFTYPE_NODE &&
-			    nsdata->vif.type == NL802154_IFTYPE_NODE)
+			if (wpan_dev->iftype == NL802154_IFTYPE_NODE &&
+			    nsdata->wpan_dev.iftype == NL802154_IFTYPE_NODE)
 				return -EBUSY;
 
 			/* check all phy mac sublayer settings are the same.
@@ -239,67 +293,13 @@
 {
 	int rc;
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-	struct ieee802154_local *local = sdata->local;
 	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
-	struct wpan_phy *phy = sdata->local->phy;
 
-	rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type);
+	rc = ieee802154_check_concurrent_iface(sdata, wpan_dev->iftype);
 	if (rc < 0)
 		return rc;
 
-	rc = mac802154_slave_open(dev);
-	if (rc < 0)
-		return rc;
-
-	mutex_lock(&phy->pib_lock);
-
-	if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
-		rc = drv_set_promiscuous_mode(local,
-					      wpan_dev->promiscuous_mode);
-		if (rc < 0)
-			goto out;
-	}
-
-	if (local->hw.flags & IEEE802154_HW_AFILT) {
-		rc = drv_set_pan_id(local, wpan_dev->pan_id);
-		if (rc < 0)
-			goto out;
-
-		rc = drv_set_extended_addr(local, wpan_dev->extended_addr);
-		if (rc < 0)
-			goto out;
-
-		rc = drv_set_short_addr(local, wpan_dev->short_addr);
-		if (rc < 0)
-			goto out;
-	}
-
-	if (local->hw.flags & IEEE802154_HW_LBT) {
-		rc = drv_set_lbt_mode(local, wpan_dev->lbt);
-		if (rc < 0)
-			goto out;
-	}
-
-	if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
-		rc = drv_set_csma_params(local, wpan_dev->min_be,
-					 wpan_dev->max_be,
-					 wpan_dev->csma_retries);
-		if (rc < 0)
-			goto out;
-	}
-
-	if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
-		rc = drv_set_max_frame_retries(local, wpan_dev->frame_retries);
-		if (rc < 0)
-			goto out;
-	}
-
-	mutex_unlock(&phy->pib_lock);
-	return 0;
-
-out:
-	mutex_unlock(&phy->pib_lock);
-	return rc;
+	return mac802154_slave_open(dev);
 }
 
 static int mac802154_slave_close(struct net_device *dev)
@@ -309,15 +309,16 @@
 
 	ASSERT_RTNL();
 
-	hrtimer_cancel(&local->ifs_timer);
-
 	netif_stop_queue(dev);
 	local->open_count--;
 
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-	if (!local->open_count)
+	if (!local->open_count) {
+		flush_workqueue(local->workqueue);
+		hrtimer_cancel(&local->ifs_timer);
 		drv_stop(local);
+	}
 
 	return 0;
 }
@@ -374,14 +375,12 @@
 	hdr.fc.type = cb->type;
 	hdr.fc.security_enabled = cb->secen;
 	hdr.fc.ack_request = cb->ackreq;
-	hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+	hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
 
 	if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
 		return -EINVAL;
 
 	if (!saddr) {
-		spin_lock_bh(&sdata->mib_lock);
-
 		if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
 		    wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
 		    wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
@@ -393,8 +392,6 @@
 		}
 
 		hdr.source.pan_id = wpan_dev->pan_id;
-
-		spin_unlock_bh(&sdata->mib_lock);
 	} else {
 		hdr.source = *(const struct ieee802154_addr *)saddr;
 	}
@@ -474,13 +471,15 @@
 		       enum nl802154_iftype type)
 {
 	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+	u8 tmp;
 
 	/* set some type-dependent values */
-	sdata->vif.type = type;
 	sdata->wpan_dev.iftype = type;
 
-	get_random_bytes(&wpan_dev->bsn, 1);
-	get_random_bytes(&wpan_dev->dsn, 1);
+	get_random_bytes(&tmp, sizeof(tmp));
+	atomic_set(&wpan_dev->bsn, tmp);
+	get_random_bytes(&tmp, sizeof(tmp));
+	atomic_set(&wpan_dev->dsn, tmp);
 
 	/* defaults per 802.15.4-2011 */
 	wpan_dev->min_be = 3;
@@ -503,7 +502,6 @@
 		sdata->dev->ml_priv = &mac802154_mlme_wpan;
 		wpan_dev->promiscuous_mode = false;
 
-		spin_lock_init(&sdata->mib_lock);
 		mutex_init(&sdata->sec_mtx);
 
 		mac802154_llsec_init(&sdata->sec);
@@ -531,7 +529,7 @@
 
 	ASSERT_RTNL();
 
-	ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name,
+	ndev = alloc_netdev(sizeof(*sdata), name,
 			    name_assign_type, ieee802154_if_setup);
 	if (!ndev)
 		return ERR_PTR(-ENOMEM);
@@ -547,7 +545,7 @@
 	switch (type) {
 	case NL802154_IFTYPE_NODE:
 		ndev->type = ARPHRD_IEEE802154;
-		if (ieee802154_is_valid_extended_addr(extended_addr))
+		if (ieee802154_is_valid_extended_unicast_addr(extended_addr))
 			ieee802154_le64_to_be64(ndev->dev_addr, &extended_addr);
 		else
 			memcpy(ndev->dev_addr, ndev->perm_addr,
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index bdccb4e..8606da4 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -36,37 +36,30 @@
 				    u8 pan_coord, u8 blx,
 				    u8 coord_realign)
 {
-	struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
-	int rc = 0;
+	struct ieee802154_llsec_params params;
+	int changed = 0;
 
 	ASSERT_RTNL();
 
 	BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
 
-	mac802154_dev_set_pan_id(dev, addr->pan_id);
-	mac802154_dev_set_short_addr(dev, addr->short_addr);
+	dev->ieee802154_ptr->pan_id = addr->pan_id;
+	dev->ieee802154_ptr->short_addr = addr->short_addr;
 	mac802154_dev_set_page_channel(dev, page, channel);
 
-	if (ops->llsec) {
-		struct ieee802154_llsec_params params;
-		int changed = 0;
+	params.pan_id = addr->pan_id;
+	changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
 
-		params.coord_shortaddr = addr->short_addr;
-		changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
+	params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
+	changed |= IEEE802154_LLSEC_PARAM_HWADDR;
 
-		params.pan_id = addr->pan_id;
-		changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+	params.coord_hwaddr = params.hwaddr;
+	changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
 
-		params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
-		changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+	params.coord_shortaddr = addr->short_addr;
+	changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
 
-		params.coord_hwaddr = params.hwaddr;
-		changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
-
-		rc = ops->llsec->set_params(dev, &params, changed);
-	}
-
-	return rc;
+	return mac802154_set_params(dev, &params, changed);
 }
 
 static int mac802154_set_mac_params(struct net_device *dev,
@@ -91,19 +84,19 @@
 	wpan_dev->frame_retries = params->frame_retries;
 	wpan_dev->lbt = params->lbt;
 
-	if (local->hw.flags & IEEE802154_HW_TXPOWER) {
+	if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) {
 		ret = drv_set_tx_power(local, params->transmit_power);
 		if (ret < 0)
 			return ret;
 	}
 
-	if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
+	if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) {
 		ret = drv_set_cca_mode(local, &params->cca);
 		if (ret < 0)
 			return ret;
 	}
 
-	if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) {
+	if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
 		ret = drv_set_cca_ed_level(local, params->cca_ed_level);
 		if (ret < 0)
 			return ret;
@@ -151,9 +144,6 @@
 
 struct ieee802154_mlme_ops mac802154_mlme_wpan = {
 	.start_req = mac802154_mlme_start_req,
-	.get_pan_id = mac802154_dev_get_pan_id,
-	.get_short_addr = mac802154_dev_get_short_addr,
-	.get_dsn = mac802154_dev_get_dsn,
 
 	.llsec = &mac802154_llsec_ops,
 
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 08cb32d..356b346 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -107,6 +107,18 @@
 
 	skb_queue_head_init(&local->skb_queue);
 
+	/* init supported flags with 802.15.4 default ranges */
+	phy->supported.max_minbe = 8;
+	phy->supported.min_maxbe = 3;
+	phy->supported.max_maxbe = 8;
+	phy->supported.min_frame_retries = -1;
+	phy->supported.max_frame_retries = 7;
+	phy->supported.max_csma_backoffs = 5;
+	phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
+
+	/* always supported */
+	phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE);
+
 	return &local->hw;
 }
 EXPORT_SYMBOL(ieee802154_alloc_hw);
@@ -155,6 +167,26 @@
 
 	ieee802154_setup_wpan_phy_pib(local->phy);
 
+	if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
+		local->phy->supported.min_csma_backoffs = 4;
+		local->phy->supported.max_csma_backoffs = 4;
+		local->phy->supported.min_maxbe = 5;
+		local->phy->supported.max_maxbe = 5;
+		local->phy->supported.min_minbe = 3;
+		local->phy->supported.max_minbe = 3;
+	}
+
+	if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
+		/* TODO should be 3, but our default value is -1 which means
+		 * no ARET handling.
+		 */
+		local->phy->supported.min_frame_retries = -1;
+		local->phy->supported.max_frame_retries = -1;
+	}
+
+	if (hw->flags & IEEE802154_HW_PROMISCUOUS)
+		local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR);
+
 	rc = wpan_phy_register(local->phy);
 	if (rc < 0)
 		goto out_wq;
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index 5cf019a..73f94fb 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -26,81 +26,22 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val)
-{
-	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-	BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-	spin_lock_bh(&sdata->mib_lock);
-	sdata->wpan_dev.short_addr = val;
-	spin_unlock_bh(&sdata->mib_lock);
-}
-
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev)
-{
-	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-	__le16 ret;
-
-	BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-	spin_lock_bh(&sdata->mib_lock);
-	ret = sdata->wpan_dev.short_addr;
-	spin_unlock_bh(&sdata->mib_lock);
-
-	return ret;
-}
-
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev)
-{
-	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-	__le16 ret;
-
-	BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-	spin_lock_bh(&sdata->mib_lock);
-	ret = sdata->wpan_dev.pan_id;
-	spin_unlock_bh(&sdata->mib_lock);
-
-	return ret;
-}
-
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val)
-{
-	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-	BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-	spin_lock_bh(&sdata->mib_lock);
-	sdata->wpan_dev.pan_id = val;
-	spin_unlock_bh(&sdata->mib_lock);
-}
-
-u8 mac802154_dev_get_dsn(const struct net_device *dev)
-{
-	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-	BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-	return sdata->wpan_dev.dsn++;
-}
-
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
 {
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 	struct ieee802154_local *local = sdata->local;
 	int res;
 
+	ASSERT_RTNL();
+
 	BUG_ON(dev->type != ARPHRD_IEEE802154);
 
 	res = drv_set_channel(local, page, chan);
 	if (res) {
 		pr_debug("set_channel failed\n");
 	} else {
-		mutex_lock(&local->phy->pib_lock);
 		local->phy->current_channel = chan;
 		local->phy->current_page = page;
-		mutex_unlock(&local->phy->pib_lock);
 	}
 }
 
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index c0d67b2..d93ad2d 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -47,8 +47,6 @@
 
 	pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
 
-	spin_lock_bh(&sdata->mib_lock);
-
 	span = wpan_dev->pan_id;
 	sshort = wpan_dev->short_addr;
 
@@ -83,13 +81,10 @@
 			skb->pkt_type = PACKET_OTHERHOST;
 		break;
 	default:
-		spin_unlock_bh(&sdata->mib_lock);
 		pr_debug("invalid dest mode\n");
 		goto fail;
 	}
 
-	spin_unlock_bh(&sdata->mib_lock);
-
 	skb->dev = sdata->dev;
 
 	rc = mac802154_llsec_decrypt(&sdata->sec, skb);
@@ -207,8 +202,10 @@
 	}
 
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		if (sdata->vif.type != NL802154_IFTYPE_NODE ||
-		    !netif_running(sdata->dev))
+		if (sdata->wpan_dev.iftype != NL802154_IFTYPE_NODE)
+			continue;
+
+		if (!ieee802154_sdata_running(sdata))
 			continue;
 
 		ieee802154_subif_frame(sdata, skb, &hdr);
@@ -232,7 +229,7 @@
 	skb->protocol = htons(ETH_P_IEEE802154);
 
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		if (sdata->vif.type != NL802154_IFTYPE_MONITOR)
+		if (sdata->wpan_dev.iftype != NL802154_IFTYPE_MONITOR)
 			continue;
 
 		if (!ieee802154_sdata_running(sdata))
diff --git a/net/mac802154/trace.c b/net/mac802154/trace.c
new file mode 100644
index 0000000..863e5e6
--- /dev/null
+++ b/net/mac802154/trace.c
@@ -0,0 +1,9 @@
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#include <net/cfg802154.h>
+#include "driver-ops.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/mac802154/trace.h b/net/mac802154/trace.h
new file mode 100644
index 0000000..6f30e0c
--- /dev/null
+++ b/net/mac802154/trace.h
@@ -0,0 +1,272 @@
+/* Based on net/mac80211/trace.h */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mac802154
+
+#if !defined(__MAC802154_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __MAC802154_DRIVER_TRACE
+
+#include <linux/tracepoint.h>
+
+#include <net/mac802154.h>
+#include "ieee802154_i.h"
+
+#define MAXNAME		32
+#define LOCAL_ENTRY	__array(char, wpan_phy_name, MAXNAME)
+#define LOCAL_ASSIGN	strlcpy(__entry->wpan_phy_name, \
+				wpan_phy_name(local->hw.phy), MAXNAME)
+#define LOCAL_PR_FMT	"%s"
+#define LOCAL_PR_ARG	__entry->wpan_phy_name
+
+#define CCA_ENTRY __field(enum nl802154_cca_modes, cca_mode) \
+		  __field(enum nl802154_cca_opts, cca_opt)
+#define CCA_ASSIGN \
+	do {                                     \
+		(__entry->cca_mode) = cca->mode; \
+		(__entry->cca_opt) = cca->opt;   \
+	} while (0)
+#define CCA_PR_FMT "cca_mode: %d, cca_opt: %d"
+#define CCA_PR_ARG __entry->cca_mode, __entry->cca_opt
+
+#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
+
+/* Tracing for driver callbacks */
+
+DECLARE_EVENT_CLASS(local_only_evt,
+	TP_PROTO(struct ieee802154_local *local),
+	TP_ARGS(local),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+	),
+	TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
+);
+
+DEFINE_EVENT(local_only_evt, 802154_drv_return_void,
+	TP_PROTO(struct ieee802154_local *local),
+	TP_ARGS(local)
+);
+
+TRACE_EVENT(802154_drv_return_int,
+	TP_PROTO(struct ieee802154_local *local, int ret),
+	TP_ARGS(local, ret),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->ret = ret;
+	),
+	TP_printk(LOCAL_PR_FMT ", returned: %d", LOCAL_PR_ARG,
+		  __entry->ret)
+);
+
+DEFINE_EVENT(local_only_evt, 802154_drv_start,
+	TP_PROTO(struct ieee802154_local *local),
+	TP_ARGS(local)
+);
+
+DEFINE_EVENT(local_only_evt, 802154_drv_stop,
+	TP_PROTO(struct ieee802154_local *local),
+	TP_ARGS(local)
+);
+
+TRACE_EVENT(802154_drv_set_channel,
+	TP_PROTO(struct ieee802154_local *local, u8 page, u8 channel),
+	TP_ARGS(local, page, channel),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(u8, page)
+		__field(u8, channel)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->page = page;
+		__entry->channel = channel;
+	),
+	TP_printk(LOCAL_PR_FMT ", page: %d, channel: %d", LOCAL_PR_ARG,
+		  __entry->page, __entry->channel)
+);
+
+TRACE_EVENT(802154_drv_set_cca_mode,
+	TP_PROTO(struct ieee802154_local *local,
+		 const struct wpan_phy_cca *cca),
+	TP_ARGS(local, cca),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		CCA_ENTRY
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		CCA_ASSIGN;
+	),
+	TP_printk(LOCAL_PR_FMT ", " CCA_PR_FMT, LOCAL_PR_ARG,
+		  CCA_PR_ARG)
+);
+
+TRACE_EVENT(802154_drv_set_cca_ed_level,
+	TP_PROTO(struct ieee802154_local *local, s32 mbm),
+	TP_ARGS(local, mbm),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(s32, mbm)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->mbm = mbm;
+	),
+	TP_printk(LOCAL_PR_FMT ", ed level: %d", LOCAL_PR_ARG,
+		  __entry->mbm)
+);
+
+TRACE_EVENT(802154_drv_set_tx_power,
+	TP_PROTO(struct ieee802154_local *local, s32 power),
+	TP_ARGS(local, power),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(s32, power)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->power = power;
+	),
+	TP_printk(LOCAL_PR_FMT ", mbm: %d", LOCAL_PR_ARG,
+		 __entry->power)
+);
+
+TRACE_EVENT(802154_drv_set_lbt_mode,
+	TP_PROTO(struct ieee802154_local *local, bool mode),
+	TP_ARGS(local, mode),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(bool, mode)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->mode = mode;
+	),
+	TP_printk(LOCAL_PR_FMT ", lbt mode: %s", LOCAL_PR_ARG,
+		  BOOL_TO_STR(__entry->mode))
+);
+
+TRACE_EVENT(802154_drv_set_short_addr,
+	TP_PROTO(struct ieee802154_local *local, __le16 short_addr),
+	TP_ARGS(local, short_addr),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(__le16, short_addr)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->short_addr = short_addr;
+	),
+	TP_printk(LOCAL_PR_FMT ", short addr: 0x%04x", LOCAL_PR_ARG,
+		  le16_to_cpu(__entry->short_addr))
+);
+
+TRACE_EVENT(802154_drv_set_pan_id,
+	TP_PROTO(struct ieee802154_local *local, __le16 pan_id),
+	TP_ARGS(local, pan_id),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(__le16, pan_id)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->pan_id = pan_id;
+	),
+	TP_printk(LOCAL_PR_FMT ", pan id: 0x%04x", LOCAL_PR_ARG,
+		  le16_to_cpu(__entry->pan_id))
+);
+
+TRACE_EVENT(802154_drv_set_extended_addr,
+	TP_PROTO(struct ieee802154_local *local, __le64 extended_addr),
+	TP_ARGS(local, extended_addr),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(__le64, extended_addr)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->extended_addr = extended_addr;
+	),
+	TP_printk(LOCAL_PR_FMT ", extended addr: 0x%llx", LOCAL_PR_ARG,
+		  le64_to_cpu(__entry->extended_addr))
+);
+
+TRACE_EVENT(802154_drv_set_pan_coord,
+	TP_PROTO(struct ieee802154_local *local, bool is_coord),
+	TP_ARGS(local, is_coord),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(bool, is_coord)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->is_coord = is_coord;
+	),
+	TP_printk(LOCAL_PR_FMT ", is_coord: %s", LOCAL_PR_ARG,
+		  BOOL_TO_STR(__entry->is_coord))
+);
+
+TRACE_EVENT(802154_drv_set_csma_params,
+	TP_PROTO(struct ieee802154_local *local, u8 min_be, u8 max_be,
+		 u8 max_csma_backoffs),
+	TP_ARGS(local, min_be, max_be, max_csma_backoffs),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(u8, min_be)
+		__field(u8, max_be)
+		__field(u8, max_csma_backoffs)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN,
+		__entry->min_be = min_be;
+		__entry->max_be = max_be;
+		__entry->max_csma_backoffs = max_csma_backoffs;
+	),
+	TP_printk(LOCAL_PR_FMT ", min be: %d, max be: %d, max csma backoffs: %d",
+		  LOCAL_PR_ARG, __entry->min_be, __entry->max_be,
+		  __entry->max_csma_backoffs)
+);
+
+TRACE_EVENT(802154_drv_set_max_frame_retries,
+	TP_PROTO(struct ieee802154_local *local, s8 max_frame_retries),
+	TP_ARGS(local, max_frame_retries),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(s8, max_frame_retries)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->max_frame_retries = max_frame_retries;
+	),
+	TP_printk(LOCAL_PR_FMT ", max frame retries: %d", LOCAL_PR_ARG,
+		  __entry->max_frame_retries)
+);
+
+TRACE_EVENT(802154_drv_set_promiscuous_mode,
+	TP_PROTO(struct ieee802154_local *local, bool on),
+	TP_ARGS(local, on),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(bool, on)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->on = on;
+	),
+	TP_printk(LOCAL_PR_FMT ", promiscuous mode: %s", LOCAL_PR_ARG,
+		  BOOL_TO_STR(__entry->on))
+);
+
+#endif /* !__MAC802154_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 150bf80..583435f 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -85,11 +85,10 @@
 			hrtimer_start(&local->ifs_timer,
 				      ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
 				      HRTIMER_MODE_REL);
-
-		consume_skb(skb);
 	} else {
 		ieee802154_wake_queue(hw);
-		consume_skb(skb);
 	}
+
+	dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 809df53..0183b32 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -62,6 +62,7 @@
 
 static struct packet_offload mpls_mc_offload __read_mostly = {
 	.type = cpu_to_be16(ETH_P_MPLS_MC),
+	.priority = 15,
 	.callbacks = {
 		.gso_segment    =	mpls_gso_segment,
 	},
@@ -69,6 +70,7 @@
 
 static struct packet_offload mpls_uc_offload __read_mostly = {
 	.type = cpu_to_be16(ETH_P_MPLS_UC),
+	.priority = 15,
 	.callbacks = {
 		.gso_segment    =	mpls_gso_segment,
 	},
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index a0f3e6a3..6eae69a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1,6 +1,14 @@
 menu "Core Netfilter Configuration"
 	depends on NET && INET && NETFILTER
 
+config NETFILTER_INGRESS
+	bool "Netfilter ingress support"
+	default y
+	select NET_INGRESS
+	help
+	  This allows you to classify packets from ingress using the Netfilter
+	  infrastructure.
+
 config NETFILTER_NETLINK
 	tristate
 
@@ -198,7 +206,7 @@
 
 config NF_CONNTRACK_H323
 	tristate "H.323 protocol support"
-	depends on (IPV6 || IPV6=n)
+	depends on IPV6 || IPV6=n
 	depends on NETFILTER_ADVANCED
 	help
 	  H.323 is a VoIP signalling protocol from ITU-T. As one of the most
@@ -448,6 +456,11 @@
 	help
 	  This option enables support for a mixed IPv4/IPv6 "inet" table.
 
+config NF_TABLES_NETDEV
+	tristate "Netfilter nf_tables netdev tables support"
+	help
+	  This option enables support for the "netdev" table.
+
 config NFT_EXTHDR
 	tristate "Netfilter nf_tables IPv6 exthdr module"
 	help
@@ -710,7 +723,7 @@
 
 config NETFILTER_XT_TARGET_HMARK
 	tristate '"HMARK" target support'
-	depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+	depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
 	depends on NETFILTER_ADVANCED
 	---help---
 	This option adds the "HMARK" target.
@@ -852,7 +865,7 @@
 config NETFILTER_XT_TARGET_TEE
 	tristate '"TEE" - packet cloning to alternate destination'
 	depends on NETFILTER_ADVANCED
-	depends on (IPV6 || IPV6=n)
+	depends on IPV6 || IPV6=n
 	depends on !NF_CONNTRACK || NF_CONNTRACK
 	---help---
 	This option adds a "TEE" target with which a packet can be cloned and
@@ -862,8 +875,8 @@
 	tristate '"TPROXY" target transparent proxying support'
 	depends on NETFILTER_XTABLES
 	depends on NETFILTER_ADVANCED
-	depends on (IPV6 || IPV6=n)
-	depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+	depends on IPV6 || IPV6=n
+	depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
 	depends on IP_NF_MANGLE
 	select NF_DEFRAG_IPV4
 	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
@@ -902,7 +915,7 @@
 
 config NETFILTER_XT_TARGET_TCPMSS
 	tristate '"TCPMSS" target support'
-	depends on (IPV6 || IPV6=n)
+	depends on IPV6 || IPV6=n
 	default m if NETFILTER_ADVANCED=n
 	---help---
 	  This option adds a `TCPMSS' target, which allows you to alter the
@@ -1114,7 +1127,7 @@
 
 config NETFILTER_XT_MATCH_HASHLIMIT
 	tristate '"hashlimit" match support'
-	depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+	depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
 	depends on NETFILTER_ADVANCED
 	help
 	  This option adds a `hashlimit' match.
@@ -1356,8 +1369,8 @@
 	depends on NETFILTER_XTABLES
 	depends on NETFILTER_ADVANCED
 	depends on !NF_CONNTRACK || NF_CONNTRACK
-	depends on (IPV6 || IPV6=n)
-	depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+	depends on IPV6 || IPV6=n
+	depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
 	select NF_DEFRAG_IPV4
 	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
 	help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a87d8b8..70d026d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -75,6 +75,7 @@
 
 obj-$(CONFIG_NF_TABLES)		+= nf_tables.o
 obj-$(CONFIG_NF_TABLES_INET)	+= nf_tables_inet.o
+obj-$(CONFIG_NF_TABLES_NETDEV)	+= nf_tables_netdev.o
 obj-$(CONFIG_NFT_COMPAT)	+= nft_compat.o
 obj-$(CONFIG_NFT_EXTHDR)	+= nft_exthdr.o
 obj-$(CONFIG_NFT_META)		+= nft_meta.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e616301..a0e5497 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -64,10 +64,27 @@
 
 int nf_register_hook(struct nf_hook_ops *reg)
 {
+	struct list_head *nf_hook_list;
 	struct nf_hook_ops *elem;
 
 	mutex_lock(&nf_hook_mutex);
-	list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) {
+	switch (reg->pf) {
+	case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+		if (reg->hooknum == NF_NETDEV_INGRESS) {
+			BUG_ON(reg->dev == NULL);
+			nf_hook_list = &reg->dev->nf_hooks_ingress;
+			net_inc_ingress_queue();
+			break;
+		}
+#endif
+		/* Fall through. */
+	default:
+		nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
+		break;
+	}
+
+	list_for_each_entry(elem, nf_hook_list, list) {
 		if (reg->priority < elem->priority)
 			break;
 	}
@@ -85,10 +102,23 @@
 	mutex_lock(&nf_hook_mutex);
 	list_del_rcu(&reg->list);
 	mutex_unlock(&nf_hook_mutex);
+	switch (reg->pf) {
+	case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+		if (reg->hooknum == NF_NETDEV_INGRESS) {
+			net_dec_ingress_queue();
+			break;
+		}
+		break;
+#endif
+	default:
+		break;
+	}
 #ifdef HAVE_JUMP_LABEL
 	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	synchronize_net();
+	nf_queue_nf_hook_drop(reg);
 }
 EXPORT_SYMBOL(nf_unregister_hook);
 
@@ -166,11 +196,9 @@
 	/* We may already have this, but read-locks nest anyway */
 	rcu_read_lock();
 
-	elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
-			      struct nf_hook_ops, list);
+	elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
 next_hook:
-	verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
-			     &elem);
+	verdict = nf_iterate(state->hook_list, skb, state, &elem);
 	if (verdict == NF_ACCEPT || verdict == NF_STOP) {
 		ret = 1;
 	} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 6f024a8..d05e759 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -41,7 +41,7 @@
 	struct mtype *map = set->data;
 
 	init_timer(&map->gc);
-	map->gc.data = (unsigned long) set;
+	map->gc.data = (unsigned long)set;
 	map->gc.function = gc;
 	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
@@ -144,10 +144,12 @@
 
 	if (ret == IPSET_ADD_FAILED) {
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(x, set)))
+		    ip_set_timeout_expired(ext_timeout(x, set))) {
 			ret = 0;
-		else if (!(flags & IPSET_FLAG_EXIST))
+		} else if (!(flags & IPSET_FLAG_EXIST)) {
+			set_bit(e->id, map->members);
 			return -IPSET_ERR_EXIST;
+		}
 		/* Element is re-added, cleanup extensions */
 		ip_set_ext_destroy(set, x);
 	}
@@ -165,6 +167,10 @@
 		ip_set_init_comment(ext_comment(x, set), ext);
 	if (SET_WITH_SKBINFO(set))
 		ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
+
+	/* Activate element */
+	set_bit(e->id, map->members);
+
 	return 0;
 }
 
@@ -203,10 +209,13 @@
 	struct nlattr *adt, *nested;
 	void *x;
 	u32 id, first = cb->args[IPSET_CB_ARG0];
+	int ret = 0;
 
 	adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
 	if (!adt)
 		return -EMSGSIZE;
+	/* Extensions may be replaced */
+	rcu_read_lock();
 	for (; cb->args[IPSET_CB_ARG0] < map->elements;
 	     cb->args[IPSET_CB_ARG0]++) {
 		id = cb->args[IPSET_CB_ARG0];
@@ -214,7 +223,7 @@
 		if (!test_bit(id, map->members) ||
 		    (SET_WITH_TIMEOUT(set) &&
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
-		     mtype_is_filled((const struct mtype_elem *) x) &&
+		     mtype_is_filled((const struct mtype_elem *)x) &&
 #endif
 		     ip_set_timeout_expired(ext_timeout(x, set))))
 			continue;
@@ -222,14 +231,16 @@
 		if (!nested) {
 			if (id == first) {
 				nla_nest_cancel(skb, adt);
-				return -EMSGSIZE;
-			} else
-				goto nla_put_failure;
+				ret = -EMSGSIZE;
+				goto out;
+			}
+
+			goto nla_put_failure;
 		}
 		if (mtype_do_list(skb, map, id, set->dsize))
 			goto nla_put_failure;
 		if (ip_set_put_extensions(skb, set, x,
-		    mtype_is_filled((const struct mtype_elem *) x)))
+		    mtype_is_filled((const struct mtype_elem *)x)))
 			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
@@ -238,29 +249,32 @@
 	/* Set listing finished */
 	cb->args[IPSET_CB_ARG0] = 0;
 
-	return 0;
+	goto out;
 
 nla_put_failure:
 	nla_nest_cancel(skb, nested);
 	if (unlikely(id == first)) {
 		cb->args[IPSET_CB_ARG0] = 0;
-		return -EMSGSIZE;
+		ret = -EMSGSIZE;
 	}
 	ipset_nest_end(skb, adt);
-	return 0;
+out:
+	rcu_read_unlock();
+	return ret;
 }
 
 static void
 mtype_gc(unsigned long ul_set)
 {
-	struct ip_set *set = (struct ip_set *) ul_set;
+	struct ip_set *set = (struct ip_set *)ul_set;
 	struct mtype *map = set->data;
 	void *x;
 	u32 id;
 
 	/* We run parallel with other readers (test element)
-	 * but adding/deleting new entries is locked out */
-	read_lock_bh(&set->lock);
+	 * but adding/deleting new entries is locked out
+	 */
+	spin_lock_bh(&set->lock);
 	for (id = 0; id < map->elements; id++)
 		if (mtype_gc_test(id, map, set->dsize)) {
 			x = get_ext(set, map, id);
@@ -269,7 +283,7 @@
 				ip_set_ext_destroy(set, x);
 			}
 		}
-	read_unlock_bh(&set->lock);
+	spin_unlock_bh(&set->lock);
 
 	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 55b083e..64a5643 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -36,6 +36,7 @@
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 #define MTYPE		bitmap_ip
+#define HOST_MASK	32
 
 /* Type structure */
 struct bitmap_ip {
@@ -58,7 +59,7 @@
 static inline u32
 ip_to_id(const struct bitmap_ip *m, u32 ip)
 {
-	return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+	return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip) / m->hosts;
 }
 
 /* Common functions */
@@ -80,7 +81,7 @@
 bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
 		 u32 flags, size_t dsize)
 {
-	return !!test_and_set_bit(e->id, map->members);
+	return !!test_bit(e->id, map->members);
 }
 
 static inline int
@@ -137,20 +138,17 @@
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret = 0;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP]))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -174,11 +172,12 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (!cidr || cidr > 32)
+		if (!cidr || cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(ip, ip_to, cidr);
-	} else
+	} else {
 		ip_to = ip;
+	}
 
 	if (ip_to > map->last_ip)
 		return -IPSET_ERR_BITMAP_RANGE;
@@ -189,8 +188,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -277,16 +276,17 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (cidr >= 32)
+		if (cidr >= HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(first_ip, last_ip, cidr);
-	} else
+	} else {
 		return -IPSET_ERR_PROTOCOL;
+	}
 
 	if (tb[IPSET_ATTR_NETMASK]) {
 		netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
 
-		if (netmask > 32)
+		if (netmask > HOST_MASK)
 			return -IPSET_ERR_INVALID_NETMASK;
 
 		first_ip &= ip_set_hostmask(netmask);
@@ -360,7 +360,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -377,6 +378,7 @@
 static void __exit
 bitmap_ip_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&bitmap_ip_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 8610474..1430535 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -36,6 +36,7 @@
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 #define MTYPE		bitmap_ipmac
+#define HOST_MASK	32
 #define IP_SET_BITMAP_STORED_TIMEOUT
 
 enum {
@@ -89,7 +90,7 @@
 		return 0;
 	elem = get_elem(map->extensions, e->id, dsize);
 	if (elem->filled == MAC_FILLED)
-		return e->ether == NULL ||
+		return !e->ether ||
 		       ether_addr_equal(e->ether, elem->ether);
 	/* Trigger kernel to fill out the ethernet address */
 	return -EAGAIN;
@@ -130,7 +131,8 @@
 		/* If MAC is unset yet, we store plain timeout value
 		 * because the timer is not activated yet
 		 * and we can reuse it later when MAC is filled out,
-		 * possibly by the kernel */
+		 * possibly by the kernel
+		 */
 		if (e->ether)
 			ip_set_timeout_set(timeout, t);
 		else
@@ -146,28 +148,35 @@
 	struct bitmap_ipmac_elem *elem;
 
 	elem = get_elem(map->extensions, e->id, dsize);
-	if (test_and_set_bit(e->id, map->members)) {
+	if (test_bit(e->id, map->members)) {
 		if (elem->filled == MAC_FILLED) {
-			if (e->ether && (flags & IPSET_FLAG_EXIST))
-				memcpy(elem->ether, e->ether, ETH_ALEN);
+			if (e->ether &&
+			    (flags & IPSET_FLAG_EXIST) &&
+			    !ether_addr_equal(e->ether, elem->ether)) {
+				/* memcpy isn't atomic */
+				clear_bit(e->id, map->members);
+				smp_mb__after_atomic();
+				ether_addr_copy(elem->ether, e->ether);
+			}
 			return IPSET_ADD_FAILED;
 		} else if (!e->ether)
 			/* Already added without ethernet address */
 			return IPSET_ADD_FAILED;
 		/* Fill the MAC address and trigger the timer activation */
-		memcpy(elem->ether, e->ether, ETH_ALEN);
+		clear_bit(e->id, map->members);
+		smp_mb__after_atomic();
+		ether_addr_copy(elem->ether, e->ether);
 		elem->filled = MAC_FILLED;
 		return IPSET_ADD_START_STORED_TIMEOUT;
 	} else if (e->ether) {
 		/* We can store MAC too */
-		memcpy(elem->ether, e->ether, ETH_ALEN);
+		ether_addr_copy(elem->ether, e->ether);
 		elem->filled = MAC_FILLED;
 		return 0;
-	} else {
-		elem->filled = MAC_UNSET;
-		/* MAC is not stored yet, don't start timer */
-		return IPSET_ADD_STORE_PLAIN_TIMEOUT;
 	}
+	elem->filled = MAC_UNSET;
+	/* MAC is not stored yet, don't start timer */
+	return IPSET_ADD_STORE_PLAIN_TIMEOUT;
 }
 
 static inline int
@@ -238,20 +247,17 @@
 	u32 ip = 0;
 	int ret = 0;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP]))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -343,11 +349,12 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (cidr >= 32)
+		if (cidr >= HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(first_ip, last_ip, cidr);
-	} else
+	} else {
 		return -IPSET_ERR_PROTOCOL;
+	}
 
 	elements = (u64)last_ip - first_ip + 1;
 
@@ -397,7 +404,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -414,6 +422,7 @@
 static void __exit
 bitmap_ipmac_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&bitmap_ipmac_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 005dd36..5338ccd 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -73,7 +73,7 @@
 bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
 		   struct bitmap_port *map, u32 flags, size_t dsize)
 {
-	return !!test_and_set_bit(e->id, map->members);
+	return !!test_bit(e->id, map->members);
 }
 
 static inline int
@@ -136,19 +136,13 @@
 	u16 port_to;
 	int ret = 0;
 
-	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
+		return -IPSET_ERR_PROTOCOL;
+
 	port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
 	if (port < map->first_port || port > map->last_port)
 		return -IPSET_ERR_BITMAP_RANGE;
@@ -168,8 +162,9 @@
 			if (port < map->first_port)
 				return -IPSET_ERR_BITMAP_RANGE;
 		}
-	} else
+	} else {
 		port_to = port;
+	}
 
 	if (port_to > map->last_port)
 		return -IPSET_ERR_BITMAP_RANGE;
@@ -180,8 +175,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -294,7 +289,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -311,6 +307,7 @@
 static void __exit
 bitmap_port_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&bitmap_port_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d259da3..338b404 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -32,8 +32,10 @@
 struct ip_set_net {
 	struct ip_set * __rcu *ip_set_list;	/* all individual sets */
 	ip_set_id_t	ip_set_max;	/* max number of sets */
-	int		is_deleted;	/* deleted by ip_set_net_exit */
+	bool		is_deleted;	/* deleted by ip_set_net_exit */
+	bool		is_destroyed;	/* all sets are destroyed */
 };
+
 static int ip_set_net_id __read_mostly;
 
 static inline struct ip_set_net *ip_set_pernet(struct net *net)
@@ -42,7 +44,7 @@
 }
 
 #define IP_SET_INC	64
-#define STREQ(a, b)	(strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+#define STRNCMP(a, b)	(strncmp(a, b, IPSET_MAXNAMELEN) == 0)
 
 static unsigned int max_sets;
 
@@ -59,8 +61,7 @@
 #define ip_set(inst, id)		\
 	ip_set_dereference((inst)->ip_set_list)[id]
 
-/*
- * The set types are implemented in modules and registered set types
+/* The set types are implemented in modules and registered set types
  * can be found in ip_set_type_list. Adding/deleting types is
  * serialized by ip_set_type_mutex.
  */
@@ -85,7 +86,7 @@
 	struct ip_set_type *type;
 
 	list_for_each_entry_rcu(type, &ip_set_type_list, list)
-		if (STREQ(type->name, name) &&
+		if (STRNCMP(type->name, name) &&
 		    (type->family == family ||
 		     type->family == NFPROTO_UNSPEC) &&
 		    revision >= type->revision_min &&
@@ -130,9 +131,10 @@
 		goto unlock;
 	}
 	/* Make sure the type is already loaded
-	 * but we don't support the revision */
+	 * but we don't support the revision
+	 */
 	list_for_each_entry_rcu(type, &ip_set_type_list, list)
-		if (STREQ(type->name, name)) {
+		if (STRNCMP(type->name, name)) {
 			err = -IPSET_ERR_FIND_TYPE;
 			goto unlock;
 		}
@@ -166,7 +168,7 @@
 	*min = 255; *max = 0;
 	rcu_read_lock();
 	list_for_each_entry_rcu(type, &ip_set_type_list, list)
-		if (STREQ(type->name, name) &&
+		if (STRNCMP(type->name, name) &&
 		    (type->family == family ||
 		     type->family == NFPROTO_UNSPEC)) {
 			found = true;
@@ -208,15 +210,15 @@
 		pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
 			type->name, family_name(type->family),
 			type->revision_min);
-		ret = -EINVAL;
-		goto unlock;
+		ip_set_type_unlock();
+		return -EINVAL;
 	}
 	list_add_rcu(&type->list, &ip_set_type_list);
 	pr_debug("type %s, family %s, revision %u:%u registered.\n",
 		 type->name, family_name(type->family),
 		 type->revision_min, type->revision_max);
-unlock:
 	ip_set_type_unlock();
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ip_set_type_register);
@@ -230,12 +232,12 @@
 		pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
 			type->name, family_name(type->family),
 			type->revision_min);
-		goto unlock;
+		ip_set_type_unlock();
+		return;
 	}
 	list_del_rcu(&type->list);
 	pr_debug("type %s, family %s with revision min %u unregistered.\n",
 		 type->name, family_name(type->family), type->revision_min);
-unlock:
 	ip_set_type_unlock();
 
 	synchronize_rcu();
@@ -289,7 +291,7 @@
 int
 ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr)
 {
-	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
 
 	if (unlikely(!flag_nested(nla)))
 		return -IPSET_ERR_PROTOCOL;
@@ -306,7 +308,7 @@
 int
 ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
 {
-	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
 
 	if (unlikely(!flag_nested(nla)))
 		return -IPSET_ERR_PROTOCOL;
@@ -317,7 +319,7 @@
 		return -IPSET_ERR_PROTOCOL;
 
 	memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
-		sizeof(struct in6_addr));
+	       sizeof(struct in6_addr));
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
@@ -365,7 +367,7 @@
 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
 {
 	enum ip_set_ext_id id;
-	size_t offset = 0;
+	size_t offset = len;
 	u32 cadt_flags = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS])
@@ -375,12 +377,12 @@
 	for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
 		if (!add_extension(id, cadt_flags, tb))
 			continue;
-		offset += ALIGN(len + offset, ip_set_extensions[id].align);
+		offset = ALIGN(offset, ip_set_extensions[id].align);
 		set->offset[id] = offset;
 		set->extensions |= ip_set_extensions[id].type;
 		offset += ip_set_extensions[id].len;
 	}
-	return len + offset;
+	return offset;
 }
 EXPORT_SYMBOL_GPL(ip_set_elem_len);
 
@@ -389,13 +391,22 @@
 		      struct ip_set_ext *ext)
 {
 	u64 fullmark;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
+		return -IPSET_ERR_PROTOCOL;
+
 	if (tb[IPSET_ATTR_TIMEOUT]) {
-		if (!(set->extensions & IPSET_EXT_TIMEOUT))
+		if (!SET_WITH_TIMEOUT(set))
 			return -IPSET_ERR_TIMEOUT;
 		ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
 	if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
-		if (!(set->extensions & IPSET_EXT_COUNTER))
+		if (!SET_WITH_COUNTER(set))
 			return -IPSET_ERR_COUNTER;
 		if (tb[IPSET_ATTR_BYTES])
 			ext->bytes = be64_to_cpu(nla_get_be64(
@@ -405,25 +416,25 @@
 						   tb[IPSET_ATTR_PACKETS]));
 	}
 	if (tb[IPSET_ATTR_COMMENT]) {
-		if (!(set->extensions & IPSET_EXT_COMMENT))
+		if (!SET_WITH_COMMENT(set))
 			return -IPSET_ERR_COMMENT;
 		ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
 	}
 	if (tb[IPSET_ATTR_SKBMARK]) {
-		if (!(set->extensions & IPSET_EXT_SKBINFO))
+		if (!SET_WITH_SKBINFO(set))
 			return -IPSET_ERR_SKBINFO;
 		fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
 		ext->skbmark = fullmark >> 32;
 		ext->skbmarkmask = fullmark & 0xffffffff;
 	}
 	if (tb[IPSET_ATTR_SKBPRIO]) {
-		if (!(set->extensions & IPSET_EXT_SKBINFO))
+		if (!SET_WITH_SKBINFO(set))
 			return -IPSET_ERR_SKBINFO;
 		ext->skbprio = be32_to_cpu(nla_get_be32(
 					    tb[IPSET_ATTR_SKBPRIO]));
 	}
 	if (tb[IPSET_ATTR_SKBQUEUE]) {
-		if (!(set->extensions & IPSET_EXT_SKBINFO))
+		if (!SET_WITH_SKBINFO(set))
 			return -IPSET_ERR_SKBINFO;
 		ext->skbqueue = be16_to_cpu(nla_get_be16(
 					    tb[IPSET_ATTR_SKBQUEUE]));
@@ -432,8 +443,32 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
 
-/*
- * Creating/destroying/renaming/swapping affect the existence and
+int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+		      const void *e, bool active)
+{
+	if (SET_WITH_TIMEOUT(set)) {
+		unsigned long *timeout = ext_timeout(e, set);
+
+		if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			htonl(active ? ip_set_timeout_get(timeout)
+				: *timeout)))
+			return -EMSGSIZE;
+	}
+	if (SET_WITH_COUNTER(set) &&
+	    ip_set_put_counter(skb, ext_counter(e, set)))
+		return -EMSGSIZE;
+	if (SET_WITH_COMMENT(set) &&
+	    ip_set_put_comment(skb, ext_comment(e, set)))
+		return -EMSGSIZE;
+	if (SET_WITH_SKBINFO(set) &&
+	    ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
+		return -EMSGSIZE;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_put_extensions);
+
+/* Creating/destroying/renaming/swapping affect the existence and
  * the properties of a set. All of these can be executed from userspace
  * only and serialized by the nfnl mutex indirectly from nfnetlink.
  *
@@ -460,8 +495,7 @@
 	write_unlock_bh(&ip_set_ref_lock);
 }
 
-/*
- * Add, del and test set entries from kernel.
+/* Add, del and test set entries from kernel.
  *
  * The set behind the index must exist and must be referenced
  * so it can't be destroyed (or changed) under our foot.
@@ -489,23 +523,23 @@
 			dev_net(par->in ? par->in : par->out), index);
 	int ret = 0;
 
-	BUG_ON(set == NULL);
+	BUG_ON(!set);
 	pr_debug("set %s, index %u\n", set->name, index);
 
 	if (opt->dim < set->type->dimension ||
 	    !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
 		return 0;
 
-	read_lock_bh(&set->lock);
+	rcu_read_lock_bh();
 	ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
-	read_unlock_bh(&set->lock);
+	rcu_read_unlock_bh();
 
 	if (ret == -EAGAIN) {
 		/* Type requests element to be completed */
 		pr_debug("element must be completed, ADD is triggered\n");
-		write_lock_bh(&set->lock);
+		spin_lock_bh(&set->lock);
 		set->variant->kadt(set, skb, par, IPSET_ADD, opt);
-		write_unlock_bh(&set->lock);
+		spin_unlock_bh(&set->lock);
 		ret = 1;
 	} else {
 		/* --return-nomatch: invert matched element */
@@ -528,16 +562,16 @@
 			dev_net(par->in ? par->in : par->out), index);
 	int ret;
 
-	BUG_ON(set == NULL);
+	BUG_ON(!set);
 	pr_debug("set %s, index %u\n", set->name, index);
 
 	if (opt->dim < set->type->dimension ||
 	    !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
 		return -IPSET_ERR_TYPE_MISMATCH;
 
-	write_lock_bh(&set->lock);
+	spin_lock_bh(&set->lock);
 	ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
-	write_unlock_bh(&set->lock);
+	spin_unlock_bh(&set->lock);
 
 	return ret;
 }
@@ -551,23 +585,22 @@
 			dev_net(par->in ? par->in : par->out), index);
 	int ret = 0;
 
-	BUG_ON(set == NULL);
+	BUG_ON(!set);
 	pr_debug("set %s, index %u\n", set->name, index);
 
 	if (opt->dim < set->type->dimension ||
 	    !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
 		return -IPSET_ERR_TYPE_MISMATCH;
 
-	write_lock_bh(&set->lock);
+	spin_lock_bh(&set->lock);
 	ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
-	write_unlock_bh(&set->lock);
+	spin_unlock_bh(&set->lock);
 
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ip_set_del);
 
-/*
- * Find set by name, reference it once. The reference makes sure the
+/* Find set by name, reference it once. The reference makes sure the
  * thing pointed to, does not go away under our feet.
  *
  */
@@ -581,7 +614,7 @@
 	rcu_read_lock();
 	for (i = 0; i < inst->ip_set_max; i++) {
 		s = rcu_dereference(inst->ip_set_list)[i];
-		if (s != NULL && STREQ(s->name, name)) {
+		if (s && STRNCMP(s->name, name)) {
 			__ip_set_get(s);
 			index = i;
 			*set = s;
@@ -594,8 +627,7 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_get_byname);
 
-/*
- * If the given set pointer points to a valid set, decrement
+/* If the given set pointer points to a valid set, decrement
  * reference count by 1. The caller shall not assume the index
  * to be valid, after calling this function.
  *
@@ -608,7 +640,7 @@
 
 	rcu_read_lock();
 	set = rcu_dereference(inst->ip_set_list)[index];
-	if (set != NULL)
+	if (set)
 		__ip_set_put(set);
 	rcu_read_unlock();
 }
@@ -622,8 +654,7 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
-/*
- * Get the name of a set behind a set index.
+/* Get the name of a set behind a set index.
  * We assume the set is referenced, so it does exist and
  * can't be destroyed. The set cannot be renamed due to
  * the referencing either.
@@ -634,7 +665,7 @@
 {
 	const struct ip_set *set = ip_set_rcu_get(net, index);
 
-	BUG_ON(set == NULL);
+	BUG_ON(!set);
 	BUG_ON(set->ref == 0);
 
 	/* Referenced, so it's safe */
@@ -642,13 +673,11 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_name_byindex);
 
-/*
- * Routines to call by external subsystems, which do not
+/* Routines to call by external subsystems, which do not
  * call nfnl_lock for us.
  */
 
-/*
- * Find set by index, reference it once. The reference makes sure the
+/* Find set by index, reference it once. The reference makes sure the
  * thing pointed to, does not go away under our feet.
  *
  * The nfnl mutex is used in the function.
@@ -674,8 +703,7 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
 
-/*
- * If the given set pointer points to a valid set, decrement
+/* If the given set pointer points to a valid set, decrement
  * reference count by 1. The caller shall not assume the index
  * to be valid, after calling this function.
  *
@@ -690,15 +718,14 @@
 	nfnl_lock(NFNL_SUBSYS_IPSET);
 	if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
 		set = ip_set(inst, index);
-		if (set != NULL)
+		if (set)
 			__ip_set_put(set);
 	}
 	nfnl_unlock(NFNL_SUBSYS_IPSET);
 }
 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
 
-/*
- * Communication protocol with userspace over netlink.
+/* Communication protocol with userspace over netlink.
  *
  * The commands are serialized by the nfnl mutex.
  */
@@ -725,7 +752,7 @@
 
 	nlh = nlmsg_put(skb, portid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
 			sizeof(*nfmsg), flags);
-	if (nlh == NULL)
+	if (!nlh)
 		return NULL;
 
 	nfmsg = nlmsg_data(nlh);
@@ -758,7 +785,7 @@
 	*id = IPSET_INVALID_ID;
 	for (i = 0; i < inst->ip_set_max; i++) {
 		set = ip_set(inst, i);
-		if (set != NULL && STREQ(set->name, name)) {
+		if (set && STRNCMP(set->name, name)) {
 			*id = i;
 			break;
 		}
@@ -784,10 +811,10 @@
 	*index = IPSET_INVALID_ID;
 	for (i = 0;  i < inst->ip_set_max; i++) {
 		s = ip_set(inst, i);
-		if (s == NULL) {
+		if (!s) {
 			if (*index == IPSET_INVALID_ID)
 				*index = i;
-		} else if (STREQ(name, s->name)) {
+		} else if (STRNCMP(name, s->name)) {
 			/* Name clash */
 			*set = s;
 			return -EEXIST;
@@ -816,18 +843,18 @@
 	struct ip_set_net *inst = ip_set_pernet(net);
 	struct ip_set *set, *clash = NULL;
 	ip_set_id_t index = IPSET_INVALID_ID;
-	struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+	struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
 	const char *name, *typename;
 	u8 family, revision;
 	u32 flags = flag_exist(nlh);
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL ||
-		     attr[IPSET_ATTR_TYPENAME] == NULL ||
-		     attr[IPSET_ATTR_REVISION] == NULL ||
-		     attr[IPSET_ATTR_FAMILY] == NULL ||
-		     (attr[IPSET_ATTR_DATA] != NULL &&
+		     !attr[IPSET_ATTR_SETNAME] ||
+		     !attr[IPSET_ATTR_TYPENAME] ||
+		     !attr[IPSET_ATTR_REVISION] ||
+		     !attr[IPSET_ATTR_FAMILY] ||
+		     (attr[IPSET_ATTR_DATA] &&
 		      !flag_nested(attr[IPSET_ATTR_DATA]))))
 		return -IPSET_ERR_PROTOCOL;
 
@@ -838,33 +865,29 @@
 	pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
 		 name, typename, family_name(family), revision);
 
-	/*
-	 * First, and without any locks, allocate and initialize
+	/* First, and without any locks, allocate and initialize
 	 * a normal base set structure.
 	 */
-	set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+	set = kzalloc(sizeof(*set), GFP_KERNEL);
 	if (!set)
 		return -ENOMEM;
-	rwlock_init(&set->lock);
+	spin_lock_init(&set->lock);
 	strlcpy(set->name, name, IPSET_MAXNAMELEN);
 	set->family = family;
 	set->revision = revision;
 
-	/*
-	 * Next, check that we know the type, and take
+	/* Next, check that we know the type, and take
 	 * a reference on the type, to make sure it stays available
 	 * while constructing our new set.
 	 *
 	 * After referencing the type, we try to create the type
 	 * specific part of the set without holding any locks.
 	 */
-	ret = find_set_type_get(typename, family, revision, &(set->type));
+	ret = find_set_type_get(typename, family, revision, &set->type);
 	if (ret)
 		goto out;
 
-	/*
-	 * Without holding any locks, create private part.
-	 */
+	/* Without holding any locks, create private part. */
 	if (attr[IPSET_ATTR_DATA] &&
 	    nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
 			     set->type->create_policy)) {
@@ -878,8 +901,7 @@
 
 	/* BTW, ret==0 here. */
 
-	/*
-	 * Here, we have a valid, constructed set and we are protected
+	/* Here, we have a valid, constructed set and we are protected
 	 * by the nfnl mutex. Find the first free index in ip_set_list
 	 * and check clashing.
 	 */
@@ -887,7 +909,7 @@
 	if (ret == -EEXIST) {
 		/* If this is the same set and requested, ignore error */
 		if ((flags & IPSET_FLAG_EXIST) &&
-		    STREQ(set->type->name, clash->type->name) &&
+		    STRNCMP(set->type->name, clash->type->name) &&
 		    set->type->family == clash->type->family &&
 		    set->type->revision_min == clash->type->revision_min &&
 		    set->type->revision_max == clash->type->revision_max &&
@@ -902,7 +924,7 @@
 			/* Wraparound */
 			goto cleanup;
 
-		list = kzalloc(sizeof(struct ip_set *) * i, GFP_KERNEL);
+		list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
 		if (!list)
 			goto cleanup;
 		/* nfnl mutex is held, both lists are valid */
@@ -916,12 +938,11 @@
 		inst->ip_set_max = i;
 		kfree(tmp);
 		ret = 0;
-	} else if (ret)
+	} else if (ret) {
 		goto cleanup;
+	}
 
-	/*
-	 * Finally! Add our shiny new set to the list, and be done.
-	 */
+	/* Finally! Add our shiny new set to the list, and be done. */
 	pr_debug("create: '%s' created with index %u!\n", set->name, index);
 	ip_set(inst, index) = set;
 
@@ -946,12 +967,9 @@
 };
 
 static void
-ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
+ip_set_destroy_set(struct ip_set *set)
 {
-	struct ip_set *set = ip_set(inst, index);
-
 	pr_debug("set: %s\n",  set->name);
-	ip_set(inst, index) = NULL;
 
 	/* Must call it without holding any lock */
 	set->variant->destroy(set);
@@ -986,30 +1004,36 @@
 	if (!attr[IPSET_ATTR_SETNAME]) {
 		for (i = 0; i < inst->ip_set_max; i++) {
 			s = ip_set(inst, i);
-			if (s != NULL && s->ref) {
+			if (s && s->ref) {
 				ret = -IPSET_ERR_BUSY;
 				goto out;
 			}
 		}
+		inst->is_destroyed = true;
 		read_unlock_bh(&ip_set_ref_lock);
 		for (i = 0; i < inst->ip_set_max; i++) {
 			s = ip_set(inst, i);
-			if (s != NULL)
-				ip_set_destroy_set(inst, i);
+			if (s) {
+				ip_set(inst, i) = NULL;
+				ip_set_destroy_set(s);
+			}
 		}
+		/* Modified by ip_set_destroy() only, which is serialized */
+		inst->is_destroyed = false;
 	} else {
 		s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
 				    &i);
-		if (s == NULL) {
+		if (!s) {
 			ret = -ENOENT;
 			goto out;
 		} else if (s->ref) {
 			ret = -IPSET_ERR_BUSY;
 			goto out;
 		}
+		ip_set(inst, i) = NULL;
 		read_unlock_bh(&ip_set_ref_lock);
 
-		ip_set_destroy_set(inst, i);
+		ip_set_destroy_set(s);
 	}
 	return 0;
 out:
@@ -1024,9 +1048,9 @@
 {
 	pr_debug("set: %s\n",  set->name);
 
-	write_lock_bh(&set->lock);
+	spin_lock_bh(&set->lock);
 	set->variant->flush(set);
-	write_unlock_bh(&set->lock);
+	spin_unlock_bh(&set->lock);
 }
 
 static int
@@ -1044,12 +1068,12 @@
 	if (!attr[IPSET_ATTR_SETNAME]) {
 		for (i = 0; i < inst->ip_set_max; i++) {
 			s = ip_set(inst, i);
-			if (s != NULL)
+			if (s)
 				ip_set_flush_set(s);
 		}
 	} else {
 		s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
-		if (s == NULL)
+		if (!s)
 			return -ENOENT;
 
 		ip_set_flush_set(s);
@@ -1081,12 +1105,12 @@
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL ||
-		     attr[IPSET_ATTR_SETNAME2] == NULL))
+		     !attr[IPSET_ATTR_SETNAME] ||
+		     !attr[IPSET_ATTR_SETNAME2]))
 		return -IPSET_ERR_PROTOCOL;
 
 	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
-	if (set == NULL)
+	if (!set)
 		return -ENOENT;
 
 	read_lock_bh(&ip_set_ref_lock);
@@ -1098,7 +1122,7 @@
 	name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
 	for (i = 0; i < inst->ip_set_max; i++) {
 		s = ip_set(inst, i);
-		if (s != NULL && STREQ(s->name, name2)) {
+		if (s && STRNCMP(s->name, name2)) {
 			ret = -IPSET_ERR_EXIST_SETNAME2;
 			goto out;
 		}
@@ -1130,23 +1154,24 @@
 	char from_name[IPSET_MAXNAMELEN];
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL ||
-		     attr[IPSET_ATTR_SETNAME2] == NULL))
+		     !attr[IPSET_ATTR_SETNAME] ||
+		     !attr[IPSET_ATTR_SETNAME2]))
 		return -IPSET_ERR_PROTOCOL;
 
 	from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
 			       &from_id);
-	if (from == NULL)
+	if (!from)
 		return -ENOENT;
 
 	to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
 			     &to_id);
-	if (to == NULL)
+	if (!to)
 		return -IPSET_ERR_EXIST_SETNAME2;
 
 	/* Features must not change.
-	 * Not an artificial restriction anymore, as we must prevent
-	 * possible loops created by swapping in setlist type of sets. */
+	 * Not an artifical restriction anymore, as we must prevent
+	 * possible loops created by swapping in setlist type of sets.
+	 */
 	if (!(from->type->features == to->type->features &&
 	      from->family == to->family))
 		return -IPSET_ERR_TYPE_MISMATCH;
@@ -1177,12 +1202,16 @@
 static int
 ip_set_dump_done(struct netlink_callback *cb)
 {
-	struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET];
 	if (cb->args[IPSET_CB_ARG0]) {
-		pr_debug("release set %s\n",
-			 ip_set(inst, cb->args[IPSET_CB_INDEX])->name);
-		__ip_set_put_byindex(inst,
-			(ip_set_id_t) cb->args[IPSET_CB_INDEX]);
+		struct ip_set_net *inst =
+			(struct ip_set_net *)cb->args[IPSET_CB_NET];
+		ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
+		struct ip_set *set = ip_set(inst, index);
+
+		if (set->variant->uref)
+			set->variant->uref(set, cb, false);
+		pr_debug("release set %s\n", set->name);
+		__ip_set_put_byindex(inst, index);
 	}
 	return 0;
 }
@@ -1204,7 +1233,7 @@
 {
 	struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
 	int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
-	struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+	struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
 	struct nlattr *attr = (void *)nlh + min_len;
 	u32 dump_type;
 	ip_set_id_t index;
@@ -1213,27 +1242,23 @@
 	nla_parse(cda, IPSET_ATTR_CMD_MAX,
 		  attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
 
-	/* cb->args[IPSET_CB_NET]:	net namespace
-	 *         [IPSET_CB_DUMP]:	dump single set/all sets
-	 *         [IPSET_CB_INDEX]: 	set index
-	 *         [IPSET_CB_ARG0]:	type specific
-	 */
-
 	if (cda[IPSET_ATTR_SETNAME]) {
 		struct ip_set *set;
 
 		set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
 				      &index);
-		if (set == NULL)
+		if (!set)
 			return -ENOENT;
 
 		dump_type = DUMP_ONE;
 		cb->args[IPSET_CB_INDEX] = index;
-	} else
+	} else {
 		dump_type = DUMP_ALL;
+	}
 
 	if (cda[IPSET_ATTR_FLAGS]) {
 		u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
+
 		dump_type |= (f << 16);
 	}
 	cb->args[IPSET_CB_NET] = (unsigned long)inst;
@@ -1251,6 +1276,7 @@
 	unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
 	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
 	u32 dump_type, dump_flags;
+	bool is_destroyed;
 	int ret = 0;
 
 	if (!cb->args[IPSET_CB_DUMP]) {
@@ -1258,7 +1284,8 @@
 		if (ret < 0) {
 			nlh = nlmsg_hdr(cb->skb);
 			/* We have to create and send the error message
-			 * manually :-( */
+			 * manually :-(
+			 */
 			if (nlh->nlmsg_flags & NLM_F_ACK)
 				netlink_ack(cb->skb, nlh, ret);
 			return ret;
@@ -1276,13 +1303,21 @@
 	pr_debug("dump type, flag: %u %u index: %ld\n",
 		 dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
 	for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
-		index = (ip_set_id_t) cb->args[IPSET_CB_INDEX];
+		index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
+		write_lock_bh(&ip_set_ref_lock);
 		set = ip_set(inst, index);
-		if (set == NULL) {
+		is_destroyed = inst->is_destroyed;
+		if (!set || is_destroyed) {
+			write_unlock_bh(&ip_set_ref_lock);
 			if (dump_type == DUMP_ONE) {
 				ret = -ENOENT;
 				goto out;
 			}
+			if (is_destroyed) {
+				/* All sets are just being destroyed */
+				ret = 0;
+				goto out;
+			}
 			continue;
 		}
 		/* When dumping all sets, we must dump "sorted"
@@ -1290,14 +1325,17 @@
 		 */
 		if (dump_type != DUMP_ONE &&
 		    ((dump_type == DUMP_ALL) ==
-		     !!(set->type->features & IPSET_DUMP_LAST)))
+		     !!(set->type->features & IPSET_DUMP_LAST))) {
+			write_unlock_bh(&ip_set_ref_lock);
 			continue;
+		}
 		pr_debug("List set: %s\n", set->name);
 		if (!cb->args[IPSET_CB_ARG0]) {
 			/* Start listing: make sure set won't be destroyed */
 			pr_debug("reference set\n");
-			__ip_set_get(set);
+			set->ref++;
 		}
+		write_unlock_bh(&ip_set_ref_lock);
 		nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
 				cb->nlh->nlmsg_seq, flags,
 				IPSET_CMD_LIST);
@@ -1325,11 +1363,13 @@
 				goto release_refcount;
 			if (dump_flags & IPSET_FLAG_LIST_HEADER)
 				goto next_set;
+			if (set->variant->uref)
+				set->variant->uref(set, cb, true);
 			/* Fall through and add elements */
 		default:
-			read_lock_bh(&set->lock);
+			rcu_read_lock_bh();
 			ret = set->variant->list(set, skb, cb);
-			read_unlock_bh(&set->lock);
+			rcu_read_unlock_bh();
 			if (!cb->args[IPSET_CB_ARG0])
 				/* Set is done, proceed with next one */
 				goto next_set;
@@ -1341,6 +1381,8 @@
 		dump_type = DUMP_LAST;
 		cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
 		cb->args[IPSET_CB_INDEX] = 0;
+		if (set && set->variant->uref)
+			set->variant->uref(set, cb, false);
 		goto dump_last;
 	}
 	goto out;
@@ -1355,7 +1397,10 @@
 release_refcount:
 	/* If there was an error or set is done, release set */
 	if (ret || !cb->args[IPSET_CB_ARG0]) {
-		pr_debug("release set %s\n", ip_set(inst, index)->name);
+		set = ip_set(inst, index);
+		if (set->variant->uref)
+			set->variant->uref(set, cb, false);
+		pr_debug("release set %s\n", set->name);
 		__ip_set_put_byindex(inst, index);
 		cb->args[IPSET_CB_ARG0] = 0;
 	}
@@ -1407,9 +1452,9 @@
 	bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
 
 	do {
-		write_lock_bh(&set->lock);
+		spin_lock_bh(&set->lock);
 		ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
-		write_unlock_bh(&set->lock);
+		spin_unlock_bh(&set->lock);
 		retried = true;
 	} while (ret == -EAGAIN &&
 		 set->variant->resize &&
@@ -1425,12 +1470,12 @@
 		size_t payload = min(SIZE_MAX,
 				     sizeof(*errmsg) + nlmsg_len(nlh));
 		int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
-		struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+		struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
 		struct nlattr *cmdattr;
 		u32 *errline;
 
 		skb2 = nlmsg_new(payload, GFP_KERNEL);
-		if (skb2 == NULL)
+		if (!skb2)
 			return -ENOMEM;
 		rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
 				  nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
@@ -1447,7 +1492,8 @@
 
 		*errline = lineno;
 
-		netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+		netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
+				MSG_DONTWAIT);
 		/* Signal netlink not to send its ACK/errmsg.  */
 		return -EINTR;
 	}
@@ -1462,25 +1508,25 @@
 {
 	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set;
-	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+	struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
 	const struct nlattr *nla;
 	u32 flags = flag_exist(nlh);
 	bool use_lineno;
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     !attr[IPSET_ATTR_SETNAME] ||
 		     !((attr[IPSET_ATTR_DATA] != NULL) ^
 		       (attr[IPSET_ATTR_ADT] != NULL)) ||
-		     (attr[IPSET_ATTR_DATA] != NULL &&
+		     (attr[IPSET_ATTR_DATA] &&
 		      !flag_nested(attr[IPSET_ATTR_DATA])) ||
-		     (attr[IPSET_ATTR_ADT] != NULL &&
+		     (attr[IPSET_ATTR_ADT] &&
 		      (!flag_nested(attr[IPSET_ATTR_ADT]) ||
-		       attr[IPSET_ATTR_LINENO] == NULL))))
+		       !attr[IPSET_ATTR_LINENO]))))
 		return -IPSET_ERR_PROTOCOL;
 
 	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
-	if (set == NULL)
+	if (!set)
 		return -ENOENT;
 
 	use_lineno = !!attr[IPSET_ATTR_LINENO];
@@ -1517,25 +1563,25 @@
 {
 	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set;
-	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+	struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
 	const struct nlattr *nla;
 	u32 flags = flag_exist(nlh);
 	bool use_lineno;
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     !attr[IPSET_ATTR_SETNAME] ||
 		     !((attr[IPSET_ATTR_DATA] != NULL) ^
 		       (attr[IPSET_ATTR_ADT] != NULL)) ||
-		     (attr[IPSET_ATTR_DATA] != NULL &&
+		     (attr[IPSET_ATTR_DATA] &&
 		      !flag_nested(attr[IPSET_ATTR_DATA])) ||
-		     (attr[IPSET_ATTR_ADT] != NULL &&
+		     (attr[IPSET_ATTR_ADT] &&
 		      (!flag_nested(attr[IPSET_ATTR_ADT]) ||
-		       attr[IPSET_ATTR_LINENO] == NULL))))
+		       !attr[IPSET_ATTR_LINENO]))))
 		return -IPSET_ERR_PROTOCOL;
 
 	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
-	if (set == NULL)
+	if (!set)
 		return -ENOENT;
 
 	use_lineno = !!attr[IPSET_ATTR_LINENO];
@@ -1572,26 +1618,26 @@
 {
 	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set;
-	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+	struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL ||
-		     attr[IPSET_ATTR_DATA] == NULL ||
+		     !attr[IPSET_ATTR_SETNAME] ||
+		     !attr[IPSET_ATTR_DATA] ||
 		     !flag_nested(attr[IPSET_ATTR_DATA])))
 		return -IPSET_ERR_PROTOCOL;
 
 	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
-	if (set == NULL)
+	if (!set)
 		return -ENOENT;
 
 	if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
 			     set->type->adt_policy))
 		return -IPSET_ERR_PROTOCOL;
 
-	read_lock_bh(&set->lock);
+	rcu_read_lock_bh();
 	ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
-	read_unlock_bh(&set->lock);
+	rcu_read_unlock_bh();
 	/* Userspace can't trigger element to be re-added */
 	if (ret == -EAGAIN)
 		ret = 1;
@@ -1613,15 +1659,15 @@
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_SETNAME] == NULL))
+		     !attr[IPSET_ATTR_SETNAME]))
 		return -IPSET_ERR_PROTOCOL;
 
 	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
-	if (set == NULL)
+	if (!set)
 		return -ENOENT;
 
 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-	if (skb2 == NULL)
+	if (!skb2)
 		return -ENOMEM;
 
 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
@@ -1670,8 +1716,8 @@
 	int ret = 0;
 
 	if (unlikely(protocol_failed(attr) ||
-		     attr[IPSET_ATTR_TYPENAME] == NULL ||
-		     attr[IPSET_ATTR_FAMILY] == NULL))
+		     !attr[IPSET_ATTR_TYPENAME] ||
+		     !attr[IPSET_ATTR_FAMILY]))
 		return -IPSET_ERR_PROTOCOL;
 
 	family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
@@ -1681,7 +1727,7 @@
 		return ret;
 
 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-	if (skb2 == NULL)
+	if (!skb2)
 		return -ENOMEM;
 
 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
@@ -1726,11 +1772,11 @@
 	struct nlmsghdr *nlh2;
 	int ret = 0;
 
-	if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+	if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
 		return -IPSET_ERR_PROTOCOL;
 
 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-	if (skb2 == NULL)
+	if (!skb2)
 		return -ENOMEM;
 
 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
@@ -1858,7 +1904,7 @@
 		ret = -EFAULT;
 		goto done;
 	}
-	op = (unsigned int *) data;
+	op = (unsigned int *)data;
 
 	if (*op < IP_SET_OP_VERSION) {
 		/* Check the version at the beginning of operations */
@@ -1970,10 +2016,11 @@
 	if (inst->ip_set_max >= IPSET_INVALID_ID)
 		inst->ip_set_max = IPSET_INVALID_ID - 1;
 
-	list = kzalloc(sizeof(struct ip_set *) * inst->ip_set_max, GFP_KERNEL);
+	list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
 	if (!list)
 		return -ENOMEM;
-	inst->is_deleted = 0;
+	inst->is_deleted = false;
+	inst->is_destroyed = false;
 	rcu_assign_pointer(inst->ip_set_list, list);
 	return 0;
 }
@@ -1986,12 +2033,14 @@
 	struct ip_set *set = NULL;
 	ip_set_id_t i;
 
-	inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
+	inst->is_deleted = true; /* flag for ip_set_nfnl_put */
 
 	for (i = 0; i < inst->ip_set_max; i++) {
 		set = ip_set(inst, i);
-		if (set != NULL)
-			ip_set_destroy_set(inst, i);
+		if (set) {
+			ip_set(inst, i) = NULL;
+			ip_set_destroy_set(set);
+		}
 	}
 	kfree(rcu_dereference_protected(inst->ip_set_list, 1));
 }
@@ -2003,11 +2052,11 @@
 	.size	= sizeof(struct ip_set_net)
 };
 
-
 static int __init
 ip_set_init(void)
 {
 	int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+
 	if (ret != 0) {
 		pr_err("ip_set: cannot register with nfnetlink.\n");
 		return ret;
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 29fb01d..42c3e3b 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -30,7 +30,7 @@
 		const struct tcphdr *th;
 
 		th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
-		if (th == NULL)
+		if (!th)
 			/* No choice either */
 			return false;
 
@@ -42,7 +42,7 @@
 		const sctp_sctphdr_t *sh;
 
 		sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
-		if (sh == NULL)
+		if (!sh)
 			/* No choice either */
 			return false;
 
@@ -55,7 +55,7 @@
 		const struct udphdr *uh;
 
 		uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
-		if (uh == NULL)
+		if (!uh)
 			/* No choice either */
 			return false;
 
@@ -67,7 +67,7 @@
 		const struct icmphdr *ic;
 
 		ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
-		if (ic == NULL)
+		if (!ic)
 			return false;
 
 		*port = (__force __be16)htons((ic->type << 8) | ic->code);
@@ -78,7 +78,7 @@
 		const struct icmp6hdr *ic;
 
 		ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
-		if (ic == NULL)
+		if (!ic)
 			return false;
 
 		*port = (__force __be16)
@@ -98,7 +98,7 @@
 		    __be16 *port, u8 *proto)
 {
 	const struct iphdr *iph = ip_hdr(skb);
-	unsigned int protooff = ip_hdrlen(skb);
+	unsigned int protooff = skb_network_offset(skb) + ip_hdrlen(skb);
 	int protocol = iph->protocol;
 
 	/* See comments at tcp_match in ip_tables.c */
@@ -116,7 +116,8 @@
 			return false;
 		default:
 			/* Other protocols doesn't have ports,
-			   so we can match fragments */
+			 * so we can match fragments.
+			 */
 			*proto = protocol;
 			return true;
 		}
@@ -135,7 +136,9 @@
 	__be16 frag_off = 0;
 
 	nexthdr = ipv6_hdr(skb)->nexthdr;
-	protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+	protoff = ipv6_skip_exthdr(skb,
+				   skb_network_offset(skb) +
+					sizeof(struct ipv6hdr), &nexthdr,
 				   &frag_off);
 	if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
 		return false;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 974ff38..afe905c 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -10,19 +10,19 @@
 
 #include <linux/rcupdate.h>
 #include <linux/jhash.h>
+#include <linux/types.h>
 #include <linux/netfilter/ipset/ip_set_timeout.h>
-#ifndef rcu_dereference_bh
-#define rcu_dereference_bh(p)	rcu_dereference(p)
-#endif
+
+#define __ipset_dereference_protected(p, c)	rcu_dereference_protected(p, c)
+#define ipset_dereference_protected(p, set) \
+	__ipset_dereference_protected(p, spin_is_locked(&(set)->lock))
 
 #define rcu_dereference_bh_nfnl(p)	rcu_dereference_bh_check(p, 1)
 
 /* Hashing which uses arrays to resolve clashing. The hash table is resized
  * (doubled) when searching becomes too long.
  * Internally jhash is used with the assumption that the size of the
- * stored data is a multiple of sizeof(u32). If storage supports timeout,
- * the timeout field must be the last one in the data structure - that field
- * is ignored when computing the hash key.
+ * stored data is a multiple of sizeof(u32).
  *
  * Readers and resizing
  *
@@ -35,7 +35,9 @@
 /* Number of elements to store in an initial array block */
 #define AHASH_INIT_SIZE			4
 /* Max number of elements to store in an array block */
-#define AHASH_MAX_SIZE			(3*AHASH_INIT_SIZE)
+#define AHASH_MAX_SIZE			(3 * AHASH_INIT_SIZE)
+/* Max muber of elements in the array block when tuned */
+#define AHASH_MAX_TUNED			64
 
 /* Max number of elements can be tuned */
 #ifdef IP_SET_HASH_WITH_MULTI
@@ -53,8 +55,9 @@
 	/* Currently, at listing one hash bucket must fit into a message.
 	 * Therefore we have a hard limit here.
 	 */
-	return n > curr && n <= 64 ? n : curr;
+	return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
 }
+
 #define TUNE_AHASH_MAX(h, multi)	\
 	((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
 #else
@@ -64,18 +67,23 @@
 
 /* A hash bucket */
 struct hbucket {
-	void *value;		/* the array of the values */
+	struct rcu_head rcu;	/* for call_rcu_bh */
+	/* Which positions are used in the array */
+	DECLARE_BITMAP(used, AHASH_MAX_TUNED);
 	u8 size;		/* size of the array */
 	u8 pos;			/* position of the first free entry */
-};
+	unsigned char value[0];	/* the array of the values */
+} __attribute__ ((aligned));
 
 /* The hash table: the table size stored here in order to make resizing easy */
 struct htable {
+	atomic_t ref;		/* References for resizing */
+	atomic_t uref;		/* References for dumping */
 	u8 htable_bits;		/* size of hash table == 2^htable_bits */
-	struct hbucket bucket[0]; /* hashtable buckets */
+	struct hbucket __rcu *bucket[0]; /* hashtable buckets */
 };
 
-#define hbucket(h, i)		(&((h)->bucket[i]))
+#define hbucket(h, i)		((h)->bucket[i])
 
 #ifndef IPSET_NET_COUNT
 #define IPSET_NET_COUNT		1
@@ -83,8 +91,8 @@
 
 /* Book-keeping of the prefixes added to the set */
 struct net_prefixes {
-	u32 nets[IPSET_NET_COUNT]; /* number of elements per cidr */
-	u8 cidr[IPSET_NET_COUNT];  /* the different cidr values in the set */
+	u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
+	u8 cidr[IPSET_NET_COUNT];  /* the cidr value */
 };
 
 /* Compute the hash table size */
@@ -97,11 +105,11 @@
 	if (hbits > 31)
 		return 0;
 	hsize = jhash_size(hbits);
-	if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket)
+	if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
 	    < hsize)
 		return 0;
 
-	return hsize * sizeof(struct hbucket) + sizeof(struct htable);
+	return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
 }
 
 /* Compute htable_bits from the user input parameter hashsize */
@@ -110,6 +118,7 @@
 {
 	/* Assume that hashsize == 2^htable_bits */
 	u8 bits = fls(hashsize - 1);
+
 	if (jhash_size(bits) != hashsize)
 		/* Round up to the first 2^n value */
 		bits = fls(hashsize);
@@ -117,30 +126,6 @@
 	return bits;
 }
 
-static int
-hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
-{
-	if (n->pos >= n->size) {
-		void *tmp;
-
-		if (n->size >= ahash_max)
-			/* Trigger rehashing */
-			return -EAGAIN;
-
-		tmp = kzalloc((n->size + AHASH_INIT_SIZE) * dsize,
-			      GFP_ATOMIC);
-		if (!tmp)
-			return -ENOMEM;
-		if (n->size) {
-			memcpy(tmp, n->value, n->size * dsize);
-			kfree(n->value);
-		}
-		n->value = tmp;
-		n->size += AHASH_INIT_SIZE;
-	}
-	return 0;
-}
-
 #ifdef IP_SET_HASH_WITH_NETS
 #if IPSET_NET_COUNT > 1
 #define __CIDR(cidr, i)		(cidr[i])
@@ -149,17 +134,21 @@
 #endif
 
 /* cidr + 1 is stored in net_prefixes to support /0 */
-#define SCIDR(cidr, i)		(__CIDR(cidr, i) + 1)
+#define NCIDR_PUT(cidr)		((cidr) + 1)
+#define NCIDR_GET(cidr)		((cidr) - 1)
 
 #ifdef IP_SET_HASH_WITH_NETS_PACKED
 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
-#define GCIDR(cidr, i)		(__CIDR(cidr, i) + 1)
-#define NCIDR(cidr)		(cidr)
+#define DCIDR_PUT(cidr)		((cidr) - 1)
+#define DCIDR_GET(cidr, i)	(__CIDR(cidr, i) + 1)
 #else
-#define GCIDR(cidr, i)		(__CIDR(cidr, i))
-#define NCIDR(cidr)		(cidr - 1)
+#define DCIDR_PUT(cidr)		(cidr)
+#define DCIDR_GET(cidr, i)	__CIDR(cidr, i)
 #endif
 
+#define INIT_CIDR(cidr, host_mask)	\
+	DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
+
 #define SET_HOST_MASK(family)	(family == AF_INET ? 32 : 128)
 
 #ifdef IP_SET_HASH_WITH_NET0
@@ -180,6 +169,7 @@
 #undef mtype_data_equal
 #undef mtype_do_data_match
 #undef mtype_data_set_flags
+#undef mtype_data_reset_elem
 #undef mtype_data_reset_flags
 #undef mtype_data_netmask
 #undef mtype_data_list
@@ -193,7 +183,6 @@
 #undef mtype_ahash_memsize
 #undef mtype_flush
 #undef mtype_destroy
-#undef mtype_gc_init
 #undef mtype_same_set
 #undef mtype_kadt
 #undef mtype_uadt
@@ -203,6 +192,7 @@
 #undef mtype_del
 #undef mtype_test_cidrs
 #undef mtype_test
+#undef mtype_uref
 #undef mtype_expire
 #undef mtype_resize
 #undef mtype_head
@@ -227,6 +217,7 @@
 #define mtype_data_list		IPSET_TOKEN(MTYPE, _data_list)
 #define mtype_data_next		IPSET_TOKEN(MTYPE, _data_next)
 #define mtype_elem		IPSET_TOKEN(MTYPE, _elem)
+
 #define mtype_ahash_destroy	IPSET_TOKEN(MTYPE, _ahash_destroy)
 #define mtype_ext_cleanup	IPSET_TOKEN(MTYPE, _ext_cleanup)
 #define mtype_add_cidr		IPSET_TOKEN(MTYPE, _add_cidr)
@@ -234,7 +225,6 @@
 #define mtype_ahash_memsize	IPSET_TOKEN(MTYPE, _ahash_memsize)
 #define mtype_flush		IPSET_TOKEN(MTYPE, _flush)
 #define mtype_destroy		IPSET_TOKEN(MTYPE, _destroy)
-#define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
 #define mtype_same_set		IPSET_TOKEN(MTYPE, _same_set)
 #define mtype_kadt		IPSET_TOKEN(MTYPE, _kadt)
 #define mtype_uadt		IPSET_TOKEN(MTYPE, _uadt)
@@ -244,23 +234,36 @@
 #define mtype_del		IPSET_TOKEN(MTYPE, _del)
 #define mtype_test_cidrs	IPSET_TOKEN(MTYPE, _test_cidrs)
 #define mtype_test		IPSET_TOKEN(MTYPE, _test)
+#define mtype_uref		IPSET_TOKEN(MTYPE, _uref)
 #define mtype_expire		IPSET_TOKEN(MTYPE, _expire)
 #define mtype_resize		IPSET_TOKEN(MTYPE, _resize)
 #define mtype_head		IPSET_TOKEN(MTYPE, _head)
 #define mtype_list		IPSET_TOKEN(MTYPE, _list)
 #define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
+#define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
 #define mtype_variant		IPSET_TOKEN(MTYPE, _variant)
 #define mtype_data_match	IPSET_TOKEN(MTYPE, _data_match)
 
+#ifndef MTYPE
+#error "MTYPE is not defined!"
+#endif
+
+#ifndef HOST_MASK
+#error "HOST_MASK is not defined!"
+#endif
+
 #ifndef HKEY_DATALEN
 #define HKEY_DATALEN		sizeof(struct mtype_elem)
 #endif
 
 #define HKEY(data, initval, htable_bits)			\
-(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval)	\
+(jhash2((u32 *)(data), HKEY_DATALEN / sizeof(u32), initval)	\
 	& jhash_mask(htable_bits))
 
 #ifndef htype
+#ifndef HTYPE
+#error "HTYPE is not defined!"
+#endif /* HTYPE */
 #define htype			HTYPE
 
 /* The generic hash structure */
@@ -280,18 +283,16 @@
 #ifdef IP_SET_HASH_WITH_NETMASK
 	u8 netmask;		/* netmask value for subnets to store */
 #endif
-#ifdef IP_SET_HASH_WITH_RBTREE
-	struct rb_root rbtree;
-#endif
 #ifdef IP_SET_HASH_WITH_NETS
 	struct net_prefixes nets[0]; /* book-keeping of prefixes */
 #endif
 };
-#endif
+#endif /* htype */
 
 #ifdef IP_SET_HASH_WITH_NETS
 /* Network cidr size book keeping when the hash stores different
- * sized networks */
+ * sized networks. cidr == real cidr + 1 to support /0.
+ */
 static void
 mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
@@ -299,11 +300,11 @@
 
 	/* Add in increasing prefix order, so larger cidr first */
 	for (i = 0, j = -1; i < nets_length && h->nets[i].cidr[n]; i++) {
-		if (j != -1)
+		if (j != -1) {
 			continue;
-		else if (h->nets[i].cidr[n] < cidr)
+		} else if (h->nets[i].cidr[n] < cidr) {
 			j = i;
-		else if (h->nets[i].cidr[n] == cidr) {
+		} else if (h->nets[i].cidr[n] == cidr) {
 			h->nets[cidr - 1].nets[n]++;
 			return;
 		}
@@ -322,15 +323,15 @@
 	u8 i, j, net_end = nets_length - 1;
 
 	for (i = 0; i < nets_length; i++) {
-	        if (h->nets[i].cidr[n] != cidr)
-	                continue;
-		h->nets[cidr -1].nets[n]--;
-		if (h->nets[cidr -1].nets[n] > 0)
-                        return;
+		if (h->nets[i].cidr[n] != cidr)
+			continue;
+		h->nets[cidr - 1].nets[n]--;
+		if (h->nets[cidr - 1].nets[n] > 0)
+			return;
 		for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
-		        h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
+			h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
 		h->nets[j].cidr[n] = 0;
-                return;
+		return;
 	}
 }
 #endif
@@ -341,15 +342,18 @@
 		    u8 nets_length, size_t dsize)
 {
 	u32 i;
-	size_t memsize = sizeof(*h)
-			 + sizeof(*t)
-#ifdef IP_SET_HASH_WITH_NETS
-			 + sizeof(struct net_prefixes) * nets_length
-#endif
-			 + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+	struct hbucket *n;
+	size_t memsize = sizeof(*h) + sizeof(*t);
 
-	for (i = 0; i < jhash_size(t->htable_bits); i++)
-		memsize += t->bucket[i].size * dsize;
+#ifdef IP_SET_HASH_WITH_NETS
+	memsize += sizeof(struct net_prefixes) * nets_length;
+#endif
+	for (i = 0; i < jhash_size(t->htable_bits); i++) {
+		n = rcu_dereference_bh(hbucket(t, i));
+		if (!n)
+			continue;
+		memsize += sizeof(struct hbucket) + n->size * dsize;
+	}
 
 	return memsize;
 }
@@ -364,7 +368,8 @@
 	int i;
 
 	for (i = 0; i < n->pos; i++)
-		ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
+		if (test_bit(i, n->used))
+			ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
 }
 
 /* Flush a hash type of set: destroy all elements */
@@ -376,16 +381,16 @@
 	struct hbucket *n;
 	u32 i;
 
-	t = rcu_dereference_bh_nfnl(h->table);
+	t = ipset_dereference_protected(h->table, set);
 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
-		n = hbucket(t, i);
-		if (n->size) {
-			if (set->extensions & IPSET_EXT_DESTROY)
-				mtype_ext_cleanup(set, n);
-			n->size = n->pos = 0;
-			/* FIXME: use slab cache */
-			kfree(n->value);
-		}
+		n = __ipset_dereference_protected(hbucket(t, i), 1);
+		if (!n)
+			continue;
+		if (set->extensions & IPSET_EXT_DESTROY)
+			mtype_ext_cleanup(set, n);
+		/* FIXME: use slab cache */
+		rcu_assign_pointer(hbucket(t, i), NULL);
+		kfree_rcu(n, rcu);
 	}
 #ifdef IP_SET_HASH_WITH_NETS
 	memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family));
@@ -401,13 +406,13 @@
 	u32 i;
 
 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
-		n = hbucket(t, i);
-		if (n->size) {
-			if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
-				mtype_ext_cleanup(set, n);
-			/* FIXME: use slab cache */
-			kfree(n->value);
-		}
+		n = __ipset_dereference_protected(hbucket(t, i), 1);
+		if (!n)
+			continue;
+		if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+			mtype_ext_cleanup(set, n);
+		/* FIXME: use slab cache */
+		kfree(n);
 	}
 
 	ip_set_free(t);
@@ -419,13 +424,11 @@
 {
 	struct htype *h = set->data;
 
-	if (set->extensions & IPSET_EXT_TIMEOUT)
+	if (SET_WITH_TIMEOUT(set))
 		del_timer_sync(&h->gc);
 
-	mtype_ahash_destroy(set, rcu_dereference_bh_nfnl(h->table), true);
-#ifdef IP_SET_HASH_WITH_RBTREE
-	rbtree_destroy(&h->rbtree);
-#endif
+	mtype_ahash_destroy(set,
+			    __ipset_dereference_protected(h->table, 1), true);
 	kfree(h);
 
 	set->data = NULL;
@@ -437,7 +440,7 @@
 	struct htype *h = set->data;
 
 	init_timer(&h->gc);
-	h->gc.data = (unsigned long) set;
+	h->gc.data = (unsigned long)set;
 	h->gc.function = gc;
 	h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&h->gc);
@@ -470,61 +473,71 @@
 	struct htable *t;
 	struct hbucket *n;
 	struct mtype_elem *data;
-	u32 i;
-	int j;
+	u32 i, j, d;
 #ifdef IP_SET_HASH_WITH_NETS
 	u8 k;
 #endif
 
-	rcu_read_lock_bh();
-	t = rcu_dereference_bh(h->table);
+	t = ipset_dereference_protected(h->table, set);
 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
-		n = hbucket(t, i);
-		for (j = 0; j < n->pos; j++) {
+		n = __ipset_dereference_protected(hbucket(t, i), 1);
+		if (!n)
+			continue;
+		for (j = 0, d = 0; j < n->pos; j++) {
+			if (!test_bit(j, n->used)) {
+				d++;
+				continue;
+			}
 			data = ahash_data(n, j, dsize);
 			if (ip_set_timeout_expired(ext_timeout(data, set))) {
 				pr_debug("expired %u/%u\n", i, j);
+				clear_bit(j, n->used);
+				smp_mb__after_atomic();
 #ifdef IP_SET_HASH_WITH_NETS
 				for (k = 0; k < IPSET_NET_COUNT; k++)
-					mtype_del_cidr(h, SCIDR(data->cidr, k),
-						       nets_length, k);
+					mtype_del_cidr(h,
+						NCIDR_PUT(DCIDR_GET(data->cidr,
+								    k)),
+						nets_length, k);
 #endif
 				ip_set_ext_destroy(set, data);
-				if (j != n->pos - 1)
-					/* Not last one */
-					memcpy(data,
-					       ahash_data(n, n->pos - 1, dsize),
-					       dsize);
-				n->pos--;
 				h->elements--;
+				d++;
 			}
 		}
-		if (n->pos + AHASH_INIT_SIZE < n->size) {
-			void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
-					    * dsize,
-					    GFP_ATOMIC);
+		if (d >= AHASH_INIT_SIZE) {
+			struct hbucket *tmp = kzalloc(sizeof(*tmp) +
+					(n->size - AHASH_INIT_SIZE) * dsize,
+					GFP_ATOMIC);
 			if (!tmp)
 				/* Still try to delete expired elements */
 				continue;
-			n->size -= AHASH_INIT_SIZE;
-			memcpy(tmp, n->value, n->size * dsize);
-			kfree(n->value);
-			n->value = tmp;
+			tmp->size = n->size - AHASH_INIT_SIZE;
+			for (j = 0, d = 0; j < n->pos; j++) {
+				if (!test_bit(j, n->used))
+					continue;
+				data = ahash_data(n, j, dsize);
+				memcpy(tmp->value + d * dsize, data, dsize);
+				set_bit(j, tmp->used);
+				d++;
+			}
+			tmp->pos = d;
+			rcu_assign_pointer(hbucket(t, i), tmp);
+			kfree_rcu(n, rcu);
 		}
 	}
-	rcu_read_unlock_bh();
 }
 
 static void
 mtype_gc(unsigned long ul_set)
 {
-	struct ip_set *set = (struct ip_set *) ul_set;
+	struct ip_set *set = (struct ip_set *)ul_set;
 	struct htype *h = set->data;
 
 	pr_debug("called\n");
-	write_lock_bh(&set->lock);
+	spin_lock_bh(&set->lock);
 	mtype_expire(set, h, NLEN(set->family), set->dsize);
-	write_unlock_bh(&set->lock);
+	spin_unlock_bh(&set->lock);
 
 	h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&h->gc);
@@ -532,93 +545,152 @@
 
 /* Resize a hash: create a new hash table with doubling the hashsize
  * and inserting the elements to it. Repeat until we succeed or
- * fail due to memory pressures. */
+ * fail due to memory pressures.
+ */
 static int
 mtype_resize(struct ip_set *set, bool retried)
 {
 	struct htype *h = set->data;
-	struct htable *t, *orig = rcu_dereference_bh_nfnl(h->table);
-	u8 htable_bits = orig->htable_bits;
+	struct htable *t, *orig;
+	u8 htable_bits;
+	size_t dsize = set->dsize;
 #ifdef IP_SET_HASH_WITH_NETS
 	u8 flags;
+	struct mtype_elem *tmp;
 #endif
 	struct mtype_elem *data;
 	struct mtype_elem *d;
 	struct hbucket *n, *m;
-	u32 i, j;
+	u32 i, j, key;
 	int ret;
 
-	/* Try to cleanup once */
-	if (SET_WITH_TIMEOUT(set) && !retried) {
-		i = h->elements;
-		write_lock_bh(&set->lock);
-		mtype_expire(set, set->data, NLEN(set->family), set->dsize);
-		write_unlock_bh(&set->lock);
-		if (h->elements < i)
-			return 0;
-	}
+#ifdef IP_SET_HASH_WITH_NETS
+	tmp = kmalloc(dsize, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+#endif
+	rcu_read_lock_bh();
+	orig = rcu_dereference_bh_nfnl(h->table);
+	htable_bits = orig->htable_bits;
+	rcu_read_unlock_bh();
 
 retry:
 	ret = 0;
 	htable_bits++;
-	pr_debug("attempt to resize set %s from %u to %u, t %p\n",
-		 set->name, orig->htable_bits, htable_bits, orig);
 	if (!htable_bits) {
 		/* In case we have plenty of memory :-) */
 		pr_warn("Cannot increase the hashsize of set %s further\n",
 			set->name);
-		return -IPSET_ERR_HASH_FULL;
+		ret = -IPSET_ERR_HASH_FULL;
+		goto out;
 	}
-	t = ip_set_alloc(sizeof(*t)
-			 + jhash_size(htable_bits) * sizeof(struct hbucket));
-	if (!t)
-		return -ENOMEM;
+	t = ip_set_alloc(htable_size(htable_bits));
+	if (!t) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	t->htable_bits = htable_bits;
 
-	read_lock_bh(&set->lock);
+	spin_lock_bh(&set->lock);
+	orig = __ipset_dereference_protected(h->table, 1);
+	/* There can't be another parallel resizing, but dumping is possible */
+	atomic_set(&orig->ref, 1);
+	atomic_inc(&orig->uref);
+	pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+		 set->name, orig->htable_bits, htable_bits, orig);
 	for (i = 0; i < jhash_size(orig->htable_bits); i++) {
-		n = hbucket(orig, i);
+		n = __ipset_dereference_protected(hbucket(orig, i), 1);
+		if (!n)
+			continue;
 		for (j = 0; j < n->pos; j++) {
-			data = ahash_data(n, j, set->dsize);
+			if (!test_bit(j, n->used))
+				continue;
+			data = ahash_data(n, j, dsize);
 #ifdef IP_SET_HASH_WITH_NETS
+			/* We have readers running parallel with us,
+			 * so the live data cannot be modified.
+			 */
 			flags = 0;
+			memcpy(tmp, data, dsize);
+			data = tmp;
 			mtype_data_reset_flags(data, &flags);
 #endif
-			m = hbucket(t, HKEY(data, h->initval, htable_bits));
-			ret = hbucket_elem_add(m, AHASH_MAX(h), set->dsize);
-			if (ret < 0) {
-#ifdef IP_SET_HASH_WITH_NETS
-				mtype_data_reset_flags(data, &flags);
-#endif
-				read_unlock_bh(&set->lock);
-				mtype_ahash_destroy(set, t, false);
-				if (ret == -EAGAIN)
-					goto retry;
-				return ret;
+			key = HKEY(data, h->initval, htable_bits);
+			m = __ipset_dereference_protected(hbucket(t, key), 1);
+			if (!m) {
+				m = kzalloc(sizeof(*m) +
+					    AHASH_INIT_SIZE * dsize,
+					    GFP_ATOMIC);
+				if (!m) {
+					ret = -ENOMEM;
+					goto cleanup;
+				}
+				m->size = AHASH_INIT_SIZE;
+				RCU_INIT_POINTER(hbucket(t, key), m);
+			} else if (m->pos >= m->size) {
+				struct hbucket *ht;
+
+				if (m->size >= AHASH_MAX(h)) {
+					ret = -EAGAIN;
+				} else {
+					ht = kzalloc(sizeof(*ht) +
+						(m->size + AHASH_INIT_SIZE)
+						* dsize,
+						GFP_ATOMIC);
+					if (!ht)
+						ret = -ENOMEM;
+				}
+				if (ret < 0)
+					goto cleanup;
+				memcpy(ht, m, sizeof(struct hbucket) +
+					      m->size * dsize);
+				ht->size = m->size + AHASH_INIT_SIZE;
+				kfree(m);
+				m = ht;
+				RCU_INIT_POINTER(hbucket(t, key), ht);
 			}
-			d = ahash_data(m, m->pos++, set->dsize);
-			memcpy(d, data, set->dsize);
+			d = ahash_data(m, m->pos, dsize);
+			memcpy(d, data, dsize);
+			set_bit(m->pos++, m->used);
 #ifdef IP_SET_HASH_WITH_NETS
 			mtype_data_reset_flags(d, &flags);
 #endif
 		}
 	}
-
 	rcu_assign_pointer(h->table, t);
-	read_unlock_bh(&set->lock);
+
+	spin_unlock_bh(&set->lock);
 
 	/* Give time to other readers of the set */
 	synchronize_rcu_bh();
 
 	pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
 		 orig->htable_bits, orig, t->htable_bits, t);
-	mtype_ahash_destroy(set, orig, false);
+	/* If there's nobody else dumping the table, destroy it */
+	if (atomic_dec_and_test(&orig->uref)) {
+		pr_debug("Table destroy by resize %p\n", orig);
+		mtype_ahash_destroy(set, orig, false);
+	}
 
-	return 0;
+out:
+#ifdef IP_SET_HASH_WITH_NETS
+	kfree(tmp);
+#endif
+	return ret;
+
+cleanup:
+	atomic_set(&orig->ref, 0);
+	atomic_dec(&orig->uref);
+	spin_unlock_bh(&set->lock);
+	mtype_ahash_destroy(set, t, false);
+	if (ret == -EAGAIN)
+		goto retry;
+	goto out;
 }
 
 /* Add an element to a hash and update the internal counters when succeeded,
- * otherwise report the proper error code. */
+ * otherwise report the proper error code.
+ */
 static int
 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 	  struct ip_set_ext *mext, u32 flags)
@@ -627,17 +699,49 @@
 	struct htable *t;
 	const struct mtype_elem *d = value;
 	struct mtype_elem *data;
-	struct hbucket *n;
-	int i, ret = 0;
-	int j = AHASH_MAX(h) + 1;
+	struct hbucket *n, *old = ERR_PTR(-ENOENT);
+	int i, j = -1;
 	bool flag_exist = flags & IPSET_FLAG_EXIST;
+	bool deleted = false, forceadd = false, reuse = false;
 	u32 key, multi = 0;
 
-	rcu_read_lock_bh();
-	t = rcu_dereference_bh(h->table);
+	if (h->elements >= h->maxelem) {
+		if (SET_WITH_TIMEOUT(set))
+			/* FIXME: when set is full, we slow down here */
+			mtype_expire(set, h, NLEN(set->family), set->dsize);
+		if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set))
+			forceadd = true;
+	}
+
+	t = ipset_dereference_protected(h->table, set);
 	key = HKEY(value, h->initval, t->htable_bits);
-	n = hbucket(t, key);
+	n = __ipset_dereference_protected(hbucket(t, key), 1);
+	if (!n) {
+		if (forceadd) {
+			if (net_ratelimit())
+				pr_warn("Set %s is full, maxelem %u reached\n",
+					set->name, h->maxelem);
+			return -IPSET_ERR_HASH_FULL;
+		} else if (h->elements >= h->maxelem) {
+			goto set_full;
+		}
+		old = NULL;
+		n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
+			    GFP_ATOMIC);
+		if (!n)
+			return -ENOMEM;
+		n->size = AHASH_INIT_SIZE;
+		goto copy_elem;
+	}
 	for (i = 0; i < n->pos; i++) {
+		if (!test_bit(i, n->used)) {
+			/* Reuse first deleted entry */
+			if (j == -1) {
+				deleted = reuse = true;
+				j = i;
+			}
+			continue;
+		}
 		data = ahash_data(n, i, set->dsize);
 		if (mtype_data_equal(data, d, &multi)) {
 			if (flag_exist ||
@@ -645,85 +749,94 @@
 			     ip_set_timeout_expired(ext_timeout(data, set)))) {
 				/* Just the extensions could be overwritten */
 				j = i;
-				goto reuse_slot;
-			} else {
-				ret = -IPSET_ERR_EXIST;
-				goto out;
+				goto overwrite_extensions;
 			}
+			return -IPSET_ERR_EXIST;
 		}
 		/* Reuse first timed out entry */
 		if (SET_WITH_TIMEOUT(set) &&
 		    ip_set_timeout_expired(ext_timeout(data, set)) &&
-		    j != AHASH_MAX(h) + 1)
+		    j == -1) {
 			j = i;
+			reuse = true;
+		}
 	}
-	if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set) && n->pos) {
-		/* Choosing the first entry in the array to replace */
-		j = 0;
-		goto reuse_slot;
-	}
-	if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
-		/* FIXME: when set is full, we slow down here */
-		mtype_expire(set, h, NLEN(set->family), set->dsize);
-
-	if (h->elements >= h->maxelem) {
-		if (net_ratelimit())
-			pr_warn("Set %s is full, maxelem %u reached\n",
-				set->name, h->maxelem);
-		ret = -IPSET_ERR_HASH_FULL;
-		goto out;
-	}
-
-reuse_slot:
-	if (j != AHASH_MAX(h) + 1) {
-		/* Fill out reused slot */
+	if (reuse || forceadd) {
 		data = ahash_data(n, j, set->dsize);
+		if (!deleted) {
 #ifdef IP_SET_HASH_WITH_NETS
-		for (i = 0; i < IPSET_NET_COUNT; i++) {
-			mtype_del_cidr(h, SCIDR(data->cidr, i),
-				       NLEN(set->family), i);
-			mtype_add_cidr(h, SCIDR(d->cidr, i),
-				       NLEN(set->family), i);
-		}
+			for (i = 0; i < IPSET_NET_COUNT; i++)
+				mtype_del_cidr(h,
+					NCIDR_PUT(DCIDR_GET(data->cidr, i)),
+					NLEN(set->family), i);
 #endif
-		ip_set_ext_destroy(set, data);
-	} else {
-		/* Use/create a new slot */
-		TUNE_AHASH_MAX(h, multi);
-		ret = hbucket_elem_add(n, AHASH_MAX(h), set->dsize);
-		if (ret != 0) {
-			if (ret == -EAGAIN)
-				mtype_data_next(&h->next, d);
-			goto out;
+			ip_set_ext_destroy(set, data);
+			h->elements--;
 		}
-		data = ahash_data(n, n->pos++, set->dsize);
-#ifdef IP_SET_HASH_WITH_NETS
-		for (i = 0; i < IPSET_NET_COUNT; i++)
-			mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family),
-				       i);
-#endif
-		h->elements++;
+		goto copy_data;
 	}
+	if (h->elements >= h->maxelem)
+		goto set_full;
+	/* Create a new slot */
+	if (n->pos >= n->size) {
+		TUNE_AHASH_MAX(h, multi);
+		if (n->size >= AHASH_MAX(h)) {
+			/* Trigger rehashing */
+			mtype_data_next(&h->next, d);
+			return -EAGAIN;
+		}
+		old = n;
+		n = kzalloc(sizeof(*n) +
+			    (old->size + AHASH_INIT_SIZE) * set->dsize,
+			    GFP_ATOMIC);
+		if (!n)
+			return -ENOMEM;
+		memcpy(n, old, sizeof(struct hbucket) +
+		       old->size * set->dsize);
+		n->size = old->size + AHASH_INIT_SIZE;
+	}
+
+copy_elem:
+	j = n->pos++;
+	data = ahash_data(n, j, set->dsize);
+copy_data:
+	h->elements++;
+#ifdef IP_SET_HASH_WITH_NETS
+	for (i = 0; i < IPSET_NET_COUNT; i++)
+		mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)),
+			       NLEN(set->family), i);
+#endif
 	memcpy(data, d, sizeof(struct mtype_elem));
+overwrite_extensions:
 #ifdef IP_SET_HASH_WITH_NETS
 	mtype_data_set_flags(data, flags);
 #endif
-	if (SET_WITH_TIMEOUT(set))
-		ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
 	if (SET_WITH_COUNTER(set))
 		ip_set_init_counter(ext_counter(data, set), ext);
 	if (SET_WITH_COMMENT(set))
 		ip_set_init_comment(ext_comment(data, set), ext);
 	if (SET_WITH_SKBINFO(set))
 		ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
+	/* Must come last for the case when timed out entry is reused */
+	if (SET_WITH_TIMEOUT(set))
+		ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
+	smp_mb__before_atomic();
+	set_bit(j, n->used);
+	if (old != ERR_PTR(-ENOENT)) {
+		rcu_assign_pointer(hbucket(t, key), n);
+		if (old)
+			kfree_rcu(old, rcu);
+	}
 
-out:
-	rcu_read_unlock_bh();
-	return ret;
+	return 0;
+set_full:
+	if (net_ratelimit())
+		pr_warn("Set %s is full, maxelem %u reached\n",
+			set->name, h->maxelem);
+	return -IPSET_ERR_HASH_FULL;
 }
 
-/* Delete an element from the hash: swap it with the last element
- * and free up space if possible.
+/* Delete an element from the hash and free up space if possible.
  */
 static int
 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
@@ -734,55 +847,70 @@
 	const struct mtype_elem *d = value;
 	struct mtype_elem *data;
 	struct hbucket *n;
-	int i, ret = -IPSET_ERR_EXIST;
-#ifdef IP_SET_HASH_WITH_NETS
-	u8 j;
-#endif
+	int i, j, k, ret = -IPSET_ERR_EXIST;
 	u32 key, multi = 0;
+	size_t dsize = set->dsize;
 
-	rcu_read_lock_bh();
-	t = rcu_dereference_bh(h->table);
+	t = ipset_dereference_protected(h->table, set);
 	key = HKEY(value, h->initval, t->htable_bits);
-	n = hbucket(t, key);
-	for (i = 0; i < n->pos; i++) {
-		data = ahash_data(n, i, set->dsize);
+	n = __ipset_dereference_protected(hbucket(t, key), 1);
+	if (!n)
+		goto out;
+	for (i = 0, k = 0; i < n->pos; i++) {
+		if (!test_bit(i, n->used)) {
+			k++;
+			continue;
+		}
+		data = ahash_data(n, i, dsize);
 		if (!mtype_data_equal(data, d, &multi))
 			continue;
 		if (SET_WITH_TIMEOUT(set) &&
 		    ip_set_timeout_expired(ext_timeout(data, set)))
 			goto out;
-		if (i != n->pos - 1)
-			/* Not last one */
-			memcpy(data, ahash_data(n, n->pos - 1, set->dsize),
-			       set->dsize);
 
-		n->pos--;
+		ret = 0;
+		clear_bit(i, n->used);
+		smp_mb__after_atomic();
+		if (i + 1 == n->pos)
+			n->pos--;
 		h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
 		for (j = 0; j < IPSET_NET_COUNT; j++)
-			mtype_del_cidr(h, SCIDR(d->cidr, j), NLEN(set->family),
-				       j);
+			mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
+				       NLEN(set->family), j);
 #endif
 		ip_set_ext_destroy(set, data);
-		if (n->pos + AHASH_INIT_SIZE < n->size) {
-			void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
-					    * set->dsize,
-					    GFP_ATOMIC);
-			if (!tmp) {
-				ret = 0;
-				goto out;
-			}
-			n->size -= AHASH_INIT_SIZE;
-			memcpy(tmp, n->value, n->size * set->dsize);
-			kfree(n->value);
-			n->value = tmp;
+
+		for (; i < n->pos; i++) {
+			if (!test_bit(i, n->used))
+				k++;
 		}
-		ret = 0;
+		if (n->pos == 0 && k == 0) {
+			rcu_assign_pointer(hbucket(t, key), NULL);
+			kfree_rcu(n, rcu);
+		} else if (k >= AHASH_INIT_SIZE) {
+			struct hbucket *tmp = kzalloc(sizeof(*tmp) +
+					(n->size - AHASH_INIT_SIZE) * dsize,
+					GFP_ATOMIC);
+			if (!tmp)
+				goto out;
+			tmp->size = n->size - AHASH_INIT_SIZE;
+			for (j = 0, k = 0; j < n->pos; j++) {
+				if (!test_bit(j, n->used))
+					continue;
+				data = ahash_data(n, j, dsize);
+				memcpy(tmp->value + k * dsize, data, dsize);
+				set_bit(j, tmp->used);
+				k++;
+			}
+			tmp->pos = k;
+			rcu_assign_pointer(hbucket(t, key), tmp);
+			kfree_rcu(n, rcu);
+		}
 		goto out;
 	}
 
 out:
-	rcu_read_unlock_bh();
 	return ret;
 }
 
@@ -801,7 +929,8 @@
 
 #ifdef IP_SET_HASH_WITH_NETS
 /* Special test function which takes into account the different network
- * sizes added to the set */
+ * sizes added to the set
+ */
 static int
 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
 		 const struct ip_set_ext *ext,
@@ -824,16 +953,21 @@
 	for (; j < nets_length && h->nets[j].cidr[0] && !multi; j++) {
 #if IPSET_NET_COUNT == 2
 		mtype_data_reset_elem(d, &orig);
-		mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]), false);
+		mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
 		for (k = 0; k < nets_length && h->nets[k].cidr[1] && !multi;
 		     k++) {
-			mtype_data_netmask(d, NCIDR(h->nets[k].cidr[1]), true);
+			mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
+					   true);
 #else
-		mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]));
+		mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
 #endif
 		key = HKEY(d, h->initval, t->htable_bits);
-		n = hbucket(t, key);
+		n =  rcu_dereference_bh(hbucket(t, key));
+		if (!n)
+			continue;
 		for (i = 0; i < n->pos; i++) {
+			if (!test_bit(i, n->used))
+				continue;
 			data = ahash_data(n, i, set->dsize);
 			if (!mtype_data_equal(data, d, &multi))
 				continue;
@@ -871,13 +1005,13 @@
 	int i, ret = 0;
 	u32 key, multi = 0;
 
-	rcu_read_lock_bh();
 	t = rcu_dereference_bh(h->table);
 #ifdef IP_SET_HASH_WITH_NETS
 	/* If we test an IP address and not a network address,
-	 * try all possible network sizes */
+	 * try all possible network sizes
+	 */
 	for (i = 0; i < IPSET_NET_COUNT; i++)
-		if (GCIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+		if (DCIDR_GET(d->cidr, i) != SET_HOST_MASK(set->family))
 			break;
 	if (i == IPSET_NET_COUNT) {
 		ret = mtype_test_cidrs(set, d, ext, mext, flags);
@@ -886,8 +1020,14 @@
 #endif
 
 	key = HKEY(d, h->initval, t->htable_bits);
-	n = hbucket(t, key);
+	n = rcu_dereference_bh(hbucket(t, key));
+	if (!n) {
+		ret = 0;
+		goto out;
+	}
 	for (i = 0; i < n->pos; i++) {
+		if (!test_bit(i, n->used))
+			continue;
 		data = ahash_data(n, i, set->dsize);
 		if (mtype_data_equal(data, d, &multi) &&
 		    !(SET_WITH_TIMEOUT(set) &&
@@ -897,7 +1037,6 @@
 		}
 	}
 out:
-	rcu_read_unlock_bh();
 	return ret;
 }
 
@@ -909,15 +1048,19 @@
 	const struct htable *t;
 	struct nlattr *nested;
 	size_t memsize;
+	u8 htable_bits;
 
+	rcu_read_lock_bh();
 	t = rcu_dereference_bh_nfnl(h->table);
 	memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize);
+	htable_bits = t->htable_bits;
+	rcu_read_unlock_bh();
 
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
 	if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
-			  htonl(jhash_size(t->htable_bits))) ||
+			  htonl(jhash_size(htable_bits))) ||
 	    nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
 		goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
@@ -941,32 +1084,63 @@
 	return -EMSGSIZE;
 }
 
+/* Make possible to run dumping parallel with resizing */
+static void
+mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
+{
+	struct htype *h = set->data;
+	struct htable *t;
+
+	if (start) {
+		rcu_read_lock_bh();
+		t = rcu_dereference_bh_nfnl(h->table);
+		atomic_inc(&t->uref);
+		cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
+		rcu_read_unlock_bh();
+	} else if (cb->args[IPSET_CB_PRIVATE]) {
+		t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
+		if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+			/* Resizing didn't destroy the hash table */
+			pr_debug("Table destroy by dump: %p\n", t);
+			mtype_ahash_destroy(set, t, false);
+		}
+		cb->args[IPSET_CB_PRIVATE] = 0;
+	}
+}
+
 /* Reply a LIST/SAVE request: dump the elements of the specified set */
 static int
 mtype_list(const struct ip_set *set,
 	   struct sk_buff *skb, struct netlink_callback *cb)
 {
-	const struct htype *h = set->data;
-	const struct htable *t = rcu_dereference_bh_nfnl(h->table);
+	const struct htable *t;
 	struct nlattr *atd, *nested;
 	const struct hbucket *n;
 	const struct mtype_elem *e;
 	u32 first = cb->args[IPSET_CB_ARG0];
 	/* We assume that one hash bucket fills into one page */
 	void *incomplete;
-	int i;
+	int i, ret = 0;
 
 	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
 	if (!atd)
 		return -EMSGSIZE;
+
 	pr_debug("list hash set %s\n", set->name);
+	t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
+	/* Expire may replace a hbucket with another one */
+	rcu_read_lock();
 	for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
 	     cb->args[IPSET_CB_ARG0]++) {
 		incomplete = skb_tail_pointer(skb);
-		n = hbucket(t, cb->args[IPSET_CB_ARG0]);
+		n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
 		pr_debug("cb->arg bucket: %lu, t %p n %p\n",
 			 cb->args[IPSET_CB_ARG0], t, n);
+		if (!n)
+			continue;
 		for (i = 0; i < n->pos; i++) {
+			if (!test_bit(i, n->used))
+				continue;
 			e = ahash_data(n, i, set->dsize);
 			if (SET_WITH_TIMEOUT(set) &&
 			    ip_set_timeout_expired(ext_timeout(e, set)))
@@ -977,9 +1151,10 @@
 			if (!nested) {
 				if (cb->args[IPSET_CB_ARG0] == first) {
 					nla_nest_cancel(skb, atd);
-					return -EMSGSIZE;
-				} else
-					goto nla_put_failure;
+					ret = -EMSGSIZE;
+					goto out;
+				}
+				goto nla_put_failure;
 			}
 			if (mtype_data_list(skb, e))
 				goto nla_put_failure;
@@ -992,7 +1167,7 @@
 	/* Set listing finished */
 	cb->args[IPSET_CB_ARG0] = 0;
 
-	return 0;
+	goto out;
 
 nla_put_failure:
 	nlmsg_trim(skb, incomplete);
@@ -1000,20 +1175,24 @@
 		pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
 			set->name);
 		cb->args[IPSET_CB_ARG0] = 0;
-		return -EMSGSIZE;
+		ret = -EMSGSIZE;
+	} else {
+		ipset_nest_end(skb, atd);
 	}
-	ipset_nest_end(skb, atd);
-	return 0;
+out:
+	rcu_read_unlock();
+	return ret;
 }
 
 static int
 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
-	    const struct xt_action_param *par,
-	    enum ipset_adt adt, struct ip_set_adt_opt *opt);
+			  const struct xt_action_param *par,
+			  enum ipset_adt adt, struct ip_set_adt_opt *opt);
 
 static int
 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
-	    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
+			  enum ipset_adt adt, u32 *lineno, u32 flags,
+			  bool retried);
 
 static const struct ip_set_type_variant mtype_variant = {
 	.kadt	= mtype_kadt,
@@ -1027,6 +1206,7 @@
 	.flush	= mtype_flush,
 	.head	= mtype_head,
 	.list	= mtype_list,
+	.uref	= mtype_uref,
 	.resize	= mtype_resize,
 	.same_set = mtype_same_set,
 };
@@ -1045,7 +1225,7 @@
 	u8 netmask;
 #endif
 	size_t hsize;
-	struct HTYPE *h;
+	struct htype *h;
 	struct htable *t;
 
 #ifndef IP_SET_PROTO_UNDEF
@@ -1064,12 +1244,14 @@
 
 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
-#ifdef IP_SET_HASH_WITH_MARKMASK
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK) ||
-#endif
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
 		return -IPSET_ERR_PROTOCOL;
+#ifdef IP_SET_HASH_WITH_MARKMASK
+	/* Separated condition in order to avoid directive in argument list */
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
+		return -IPSET_ERR_PROTOCOL;
+#endif
 
 	if (tb[IPSET_ATTR_HASHSIZE]) {
 		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
@@ -1092,7 +1274,7 @@
 #endif
 #ifdef IP_SET_HASH_WITH_MARKMASK
 	if (tb[IPSET_ATTR_MARKMASK]) {
-		markmask = ntohl(nla_get_u32(tb[IPSET_ATTR_MARKMASK]));
+		markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
 
 		if (markmask == 0)
 			return -IPSET_ERR_INVALID_MARKMASK;
@@ -1165,3 +1347,5 @@
 	return 0;
 }
 #endif /* IP_SET_EMIT_CREATE */
+
+#undef HKEY_DATALEN
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 76959d7..9d6bf19 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -56,15 +56,15 @@
 	return e1->ip == e2->ip;
 }
 
-static inline bool
+static bool
 hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
 {
 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -74,7 +74,6 @@
 }
 
 #define MTYPE		hash_ip4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
@@ -109,20 +108,17 @@
 	u32 ip = 0, ip_to = 0, hosts;
 	int ret = 0;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP]))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -145,7 +141,7 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (!cidr || cidr > 32)
+		if (!cidr || cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(ip, ip_to, cidr);
 	}
@@ -162,8 +158,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -196,10 +192,10 @@
 {
 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -208,12 +204,9 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE		hash_ip6
-#define PF		6
 #define HOST_MASK	128
 
 #define IP_SET_EMIT_CREATE
@@ -247,22 +240,25 @@
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
-		     tb[IPSET_ATTR_IP_TO] ||
-		     tb[IPSET_ATTR_CIDR]))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP]))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+	if (unlikely(tb[IPSET_ATTR_CIDR])) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr != HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -301,7 +297,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -318,6 +315,7 @@
 static void __exit
 hash_ip_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_ip_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index 7abf978..a0695a2 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -63,10 +63,10 @@
 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 	    nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -76,10 +76,8 @@
 	next->ip = d->ip;
 }
 
-#define MTYPE           hash_ipmark4
-#define PF              4
-#define HOST_MASK       32
-#define HKEY_DATALEN	sizeof(struct hash_ipmark4_elem)
+#define MTYPE		hash_ipmark4
+#define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
 static int
@@ -110,25 +108,22 @@
 	u32 ip, ip_to = 0;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
 	if (ret)
 		return ret;
 
-	e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
 	e.mark &= h->markmask;
 
 	if (adt == IPSET_TEST ||
@@ -147,7 +142,7 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (!cidr || cidr > 32)
+		if (!cidr || cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(ip, ip_to, cidr);
 	}
@@ -160,8 +155,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -191,10 +186,10 @@
 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 	    nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -204,18 +199,13 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE		hash_ipmark6
-#define PF		6
 #define HOST_MASK	128
-#define HKEY_DATALEN	sizeof(struct hash_ipmark6_elem)
-#define	IP_SET_EMIT_CREATE
+#define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
-
 static int
 hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb,
 		  const struct xt_action_param *par,
@@ -243,27 +233,30 @@
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
-		     tb[IPSET_ATTR_IP_TO] ||
-		     tb[IPSET_ATTR_CIDR]))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+	if (unlikely(tb[IPSET_ATTR_CIDR])) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr != HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
 	if (ret)
 		return ret;
 
-	e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
 	e.mark &= h->markmask;
 
 	if (adt == IPSET_TEST) {
@@ -274,10 +267,8 @@
 	ret = adtfn(set, &e, &ext, &ext, flags);
 	if (ret && !ip_set_eexist(ret, flags))
 		return ret;
-	else
-		ret = 0;
 
-	return ret;
+	return 0;
 }
 
 static struct ip_set_type hash_ipmark_type __read_mostly = {
@@ -307,7 +298,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -324,6 +316,7 @@
 static void __exit
 hash_ipmark_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_ipmark_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index dcbcceb..9d84b3d 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -69,10 +69,10 @@
 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -83,10 +83,8 @@
 	next->port = d->port;
 }
 
-#define MTYPE           hash_ipport4
-#define PF              4
-#define HOST_MASK       32
-#define HKEY_DATALEN	sizeof(struct hash_ipport4_elem)
+#define MTYPE		hash_ipport4
+#define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
 static int
@@ -118,29 +116,23 @@
 	bool with_ports = false;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -148,8 +140,9 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMP))
 		e.port = 0;
@@ -171,7 +164,7 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (!cidr || cidr > 32)
+		if (!cidr || cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(ip, ip_to, cidr);
 	}
@@ -195,8 +188,8 @@
 
 			if (ret && !ip_set_eexist(ret, flags))
 				return ret;
-			else
-				ret = 0;
+
+			ret = 0;
 		}
 	}
 	return ret;
@@ -231,10 +224,10 @@
 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -245,15 +238,11 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE		hash_ipport6
-#define PF		6
 #define HOST_MASK	128
-#define HKEY_DATALEN	sizeof(struct hash_ipport6_elem)
-#define	IP_SET_EMIT_CREATE
+#define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
 static int
@@ -285,31 +274,31 @@
 	bool with_ports = false;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
-		     tb[IPSET_ATTR_IP_TO] ||
-		     tb[IPSET_ATTR_CIDR]))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+	if (unlikely(tb[IPSET_ATTR_CIDR])) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr != HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -317,8 +306,9 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
 		e.port = 0;
@@ -341,8 +331,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -376,7 +366,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -393,6 +384,7 @@
 static void __exit
 hash_ipport_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_ipport_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 7ef93fc..215b7b9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -63,17 +63,17 @@
 
 static bool
 hash_ipportip4_data_list(struct sk_buff *skb,
-		       const struct hash_ipportip4_elem *data)
+			 const struct hash_ipportip4_elem *data)
 {
 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -86,7 +86,6 @@
 
 /* Common functions */
 #define MTYPE		hash_ipportip4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
@@ -120,22 +119,19 @@
 	bool with_ports = false;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -143,10 +139,7 @@
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -154,8 +147,9 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMP))
 		e.port = 0;
@@ -177,7 +171,7 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (!cidr || cidr > 32)
+		if (!cidr || cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(ip, ip_to, cidr);
 	}
@@ -201,8 +195,8 @@
 
 			if (ret && !ip_set_eexist(ret, flags))
 				return ret;
-			else
-				ret = 0;
+
+			ret = 0;
 		}
 	}
 	return ret;
@@ -240,10 +234,10 @@
 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -254,11 +248,9 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE		hash_ipportip6
-#define PF		6
 #define HOST_MASK	128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -293,24 +285,27 @@
 	bool with_ports = false;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
-		     tb[IPSET_ATTR_IP_TO] ||
-		     tb[IPSET_ATTR_CIDR]))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+	if (unlikely(tb[IPSET_ATTR_CIDR])) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr != HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -318,10 +313,7 @@
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -329,8 +321,9 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
 		e.port = 0;
@@ -353,8 +346,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -388,7 +381,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -405,6 +399,7 @@
 static void __exit
 hash_ipportip_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_ipportip_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index b6012ad..9ca7196 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -114,10 +114,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -130,7 +130,6 @@
 }
 
 #define MTYPE		hash_ipportnet4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
@@ -142,7 +141,7 @@
 	const struct hash_ipportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet4_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
@@ -174,23 +173,20 @@
 	u8 cidr;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -205,10 +201,7 @@
 		e.cidr = cidr - 1;
 	}
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -216,14 +209,16 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMP))
 		e.port = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -249,7 +244,7 @@
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-		if (!cidr || cidr > 32)
+		if (!cidr || cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 		ip_set_mask_from_to(ip, ip_to, cidr);
 	}
@@ -270,8 +265,9 @@
 			swap(ip2_from, ip2_to);
 		if (ip2_from + UINT_MAX == ip2_to)
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
+	}
 
 	if (retried)
 		ip = ntohl(h->next.ip);
@@ -294,8 +290,8 @@
 
 				if (ret && !ip_set_eexist(ret, flags))
 					return ret;
-				else
-					ret = 0;
+
+				ret = 0;
 				ip2 = ip2_last + 1;
 			}
 		}
@@ -367,10 +363,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -381,11 +377,9 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE		hash_ipportnet6
-#define PF		6
 #define HOST_MASK	128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -398,7 +392,7 @@
 	const struct hash_ipportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet6_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
@@ -429,27 +423,28 @@
 	u8 cidr;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
-		     tb[IPSET_ATTR_IP_TO] ||
-		     tb[IPSET_ATTR_CIDR]))
-		return -IPSET_ERR_PROTOCOL;
-	if (unlikely(tb[IPSET_ATTR_IP_TO]))
-		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+	if (unlikely(tb[IPSET_ATTR_CIDR])) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr != HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -466,10 +461,7 @@
 
 	ip6_netmask(&e.ip2, e.cidr + 1);
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -477,14 +469,16 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
 		e.port = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -508,8 +502,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -547,7 +541,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -564,6 +559,7 @@
 static void __exit
 hash_ipportnet_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_ipportnet_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index 65690b5..f1e7d2c 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -52,7 +52,12 @@
 static inline bool
 hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
 {
-	return nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether);
+	if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
+		goto nla_put_failure;
+	return false;
+
+nla_put_failure:
+	return true;
 }
 
 static inline void
@@ -62,7 +67,6 @@
 }
 
 #define MTYPE		hash_mac4
-#define PF		4
 #define HOST_MASK	32
 #define IP_SET_EMIT_CREATE
 #define IP_SET_PROTO_UNDEF
@@ -85,10 +89,10 @@
 		return 0;
 
 	if (skb_mac_header(skb) < skb->head ||
-	     (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+	ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
 	if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
 		return -EINVAL;
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
@@ -103,22 +107,16 @@
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_ETHER] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+	if (unlikely(!tb[IPSET_ATTR_ETHER]))
+		return -IPSET_ERR_PROTOCOL;
+
 	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
-	memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
+	ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]));
 	if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
 		return -IPSET_ERR_HASH_ELEM;
 
@@ -149,7 +147,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -166,6 +165,7 @@
 static void __exit
 hash_mac_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_mac_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 6b3ac10..3e4bffd 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -95,10 +95,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -109,7 +109,6 @@
 }
 
 #define MTYPE		hash_net4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
@@ -121,7 +120,7 @@
 	const struct hash_net *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net4_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
@@ -147,21 +146,18 @@
 	u32 ip = 0, ip_to = 0, last;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -173,6 +169,7 @@
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -180,7 +177,7 @@
 	if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
 		e.ip = htonl(ip & ip_set_hostmask(e.cidr));
 		ret = adtfn(set, &e, &ext, &ext, flags);
-		return ip_set_enomatch(ret, flags, adt, set) ? -ret:
+		return ip_set_enomatch(ret, flags, adt, set) ? -ret :
 		       ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -202,8 +199,8 @@
 		ret = adtfn(set, &e, &ext, &ext, flags);
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 		ip = last + 1;
 	}
 	return ret;
@@ -264,10 +261,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -277,11 +274,9 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE		hash_net6
-#define PF		6
 #define HOST_MASK	128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -294,7 +289,7 @@
 	const struct hash_net *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net6_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
@@ -318,36 +313,34 @@
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
 		return -IPSET_ERR_PROTOCOL;
 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
 		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
 
-	if (tb[IPSET_ATTR_LINENO])
-		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
-
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR])
-		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
 
-	if (!e.cidr || e.cidr > HOST_MASK)
-		return -IPSET_ERR_INVALID_CIDR;
+	if (tb[IPSET_ATTR_CIDR]) {
+		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!e.cidr || e.cidr > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
 	ip6_netmask(&e.ip, e.cidr);
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -383,7 +376,8 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -400,6 +394,7 @@
 static void __exit
 hash_net_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_net_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 380ef51..43d8c98 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -13,7 +13,6 @@
 #include <linux/skbuff.h>
 #include <linux/errno.h>
 #include <linux/random.h>
-#include <linux/rbtree.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/netlink.h>
@@ -37,88 +36,13 @@
 IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,iface");
 
-/* Interface name rbtree */
-
-struct iface_node {
-	struct rb_node node;
-	char iface[IFNAMSIZ];
-};
-
-#define iface_data(n)	(rb_entry(n, struct iface_node, node)->iface)
-
-static void
-rbtree_destroy(struct rb_root *root)
-{
-	struct iface_node *node, *next;
-
-	rbtree_postorder_for_each_entry_safe(node, next, root, node)
-		kfree(node);
-
-	*root = RB_ROOT;
-}
-
-static int
-iface_test(struct rb_root *root, const char **iface)
-{
-	struct rb_node *n = root->rb_node;
-
-	while (n) {
-		const char *d = iface_data(n);
-		int res = strcmp(*iface, d);
-
-		if (res < 0)
-			n = n->rb_left;
-		else if (res > 0)
-			n = n->rb_right;
-		else {
-			*iface = d;
-			return 1;
-		}
-	}
-	return 0;
-}
-
-static int
-iface_add(struct rb_root *root, const char **iface)
-{
-	struct rb_node **n = &(root->rb_node), *p = NULL;
-	struct iface_node *d;
-
-	while (*n) {
-		char *ifname = iface_data(*n);
-		int res = strcmp(*iface, ifname);
-
-		p = *n;
-		if (res < 0)
-			n = &((*n)->rb_left);
-		else if (res > 0)
-			n = &((*n)->rb_right);
-		else {
-			*iface = ifname;
-			return 0;
-		}
-	}
-
-	d = kzalloc(sizeof(*d), GFP_ATOMIC);
-	if (!d)
-		return -ENOMEM;
-	strcpy(d->iface, *iface);
-
-	rb_link_node(&d->node, p, n);
-	rb_insert_color(&d->node, root);
-
-	*iface = d->iface;
-	return 0;
-}
-
 /* Type specific function prefix */
 #define HTYPE		hash_netiface
 #define IP_SET_HASH_WITH_NETS
-#define IP_SET_HASH_WITH_RBTREE
 #define IP_SET_HASH_WITH_MULTI
 #define IP_SET_HASH_WITH_NET0
 
-#define STREQ(a, b)	(strcmp(a, b) == 0)
+#define STRLCPY(a, b)	strlcpy(a, b, IFNAMSIZ)
 
 /* IPv4 variant */
 
@@ -137,7 +61,7 @@
 	u8 cidr;
 	u8 nomatch;
 	u8 elem;
-	const char *iface;
+	char iface[IFNAMSIZ];
 };
 
 /* Common functions */
@@ -151,7 +75,7 @@
 	       ip1->cidr == ip2->cidr &&
 	       (++*multi) &&
 	       ip1->physdev == ip2->physdev &&
-	       ip1->iface == ip2->iface;
+	       strcmp(ip1->iface, ip2->iface) == 0;
 }
 
 static inline int
@@ -193,10 +117,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -207,7 +131,6 @@
 }
 
 #define MTYPE		hash_netiface4
-#define PF		4
 #define HOST_MASK	32
 #define HKEY_DATALEN	sizeof(struct hash_netiface4_elem_hashed)
 #include "ip_set_hash_gen.h"
@@ -220,7 +143,7 @@
 	return dev ? dev->name : NULL;
 }
 
-static const char *get_phyoutdev_name(const struct sk_buff *skb)
+static const char *get_physoutdev_name(const struct sk_buff *skb)
 {
 	struct net_device *dev = nf_bridge_get_physoutdev(skb);
 
@@ -236,11 +159,10 @@
 	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface4_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 		.elem = 1,
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
-	int ret;
 
 	if (e.cidr == 0)
 		return -EINVAL;
@@ -250,35 +172,25 @@
 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
 	e.ip &= ip_set_netmask(e.cidr);
 
-#define IFACE(dir)	(par->dir ? par->dir->name : NULL)
+#define IFACE(dir)	(par->dir ? par->dir->name : "")
 #define SRCDIR		(opt->flags & IPSET_DIM_TWO_SRC)
 
 	if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-		e.iface = SRCDIR ? get_physindev_name(skb) :
-				   get_phyoutdev_name(skb);
+		const char *eiface = SRCDIR ? get_physindev_name(skb) :
+					      get_physoutdev_name(skb);
 
-		if (!e.iface)
+		if (!eiface)
 			return -EINVAL;
+		STRLCPY(e.iface, eiface);
 		e.physdev = 1;
-#else
-		e.iface = NULL;
 #endif
-	} else
-		e.iface = SRCDIR ? IFACE(in) : IFACE(out);
+	} else {
+		STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
+	}
 
-	if (!e.iface)
+	if (strlen(e.iface) == 0)
 		return -EINVAL;
-	ret = iface_test(&h->rbtree, &e.iface);
-	if (adt == IPSET_ADD) {
-		if (!ret) {
-			ret = iface_add(&h->rbtree, &e.iface);
-			if (ret)
-				return ret;
-		}
-	} else if (!ret)
-		return ret;
-
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
 }
 
@@ -291,25 +203,21 @@
 	struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 ip = 0, ip_to = 0, last;
-	char iface[IFNAMSIZ];
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !tb[IPSET_ATTR_IFACE] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !tb[IPSET_ATTR_IFACE] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -318,21 +226,11 @@
 		if (e.cidr > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
 	}
-
-	strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
-	e.iface = iface;
-	ret = iface_test(&h->rbtree, &e.iface);
-	if (adt == IPSET_ADD) {
-		if (!ret) {
-			ret = iface_add(&h->rbtree, &e.iface);
-			if (ret)
-				return ret;
-		}
-	} else if (!ret)
-		return ret;
+	nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_PHYSDEV)
 			e.physdev = 1;
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
@@ -353,8 +251,9 @@
 			swap(ip, ip_to);
 		if (ip + UINT_MAX == ip_to)
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip, ip_to, e.cidr);
+	}
 
 	if (retried)
 		ip = ntohl(h->next.ip);
@@ -365,8 +264,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 		ip = last + 1;
 	}
 	return ret;
@@ -388,7 +287,7 @@
 	u8 cidr;
 	u8 nomatch;
 	u8 elem;
-	const char *iface;
+	char iface[IFNAMSIZ];
 };
 
 /* Common functions */
@@ -402,7 +301,7 @@
 	       ip1->cidr == ip2->cidr &&
 	       (++*multi) &&
 	       ip1->physdev == ip2->physdev &&
-	       ip1->iface == ip2->iface;
+	       strcmp(ip1->iface, ip2->iface) == 0;
 }
 
 static inline int
@@ -444,10 +343,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -457,12 +356,9 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE		hash_netiface6
-#define PF		6
 #define HOST_MASK	128
 #define HKEY_DATALEN	sizeof(struct hash_netiface6_elem_hashed)
 #define IP_SET_EMIT_CREATE
@@ -476,11 +372,10 @@
 	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface6_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 		.elem = 1,
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
-	int ret;
 
 	if (e.cidr == 0)
 		return -EINVAL;
@@ -492,85 +387,64 @@
 
 	if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-		e.iface = SRCDIR ? get_physindev_name(skb) :
-				   get_phyoutdev_name(skb);
-		if (!e.iface)
+		const char *eiface = SRCDIR ? get_physindev_name(skb) :
+					      get_physoutdev_name(skb);
+
+		if (!eiface)
 			return -EINVAL;
-
+		STRLCPY(e.iface, eiface);
 		e.physdev = 1;
-#else
-		e.iface = NULL;
 #endif
-	} else
-		e.iface = SRCDIR ? IFACE(in) : IFACE(out);
+	} else {
+		STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
+	}
 
-	if (!e.iface)
+	if (strlen(e.iface) == 0)
 		return -EINVAL;
-	ret = iface_test(&h->rbtree, &e.iface);
-	if (adt == IPSET_ADD) {
-		if (!ret) {
-			ret = iface_add(&h->rbtree, &e.iface);
-			if (ret)
-				return ret;
-		}
-	} else if (!ret)
-		return ret;
 
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
 }
 
 static int
 hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
-		   enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-	char iface[IFNAMSIZ];
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !tb[IPSET_ATTR_IFACE] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-	if (unlikely(tb[IPSET_ATTR_IP_TO]))
-		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !tb[IPSET_ATTR_IFACE] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR])
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR]) {
 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-	if (e.cidr > HOST_MASK)
-		return -IPSET_ERR_INVALID_CIDR;
+		if (e.cidr > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
 	ip6_netmask(&e.ip, e.cidr);
 
-	strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
-	e.iface = iface;
-	ret = iface_test(&h->rbtree, &e.iface);
-	if (adt == IPSET_ADD) {
-		if (!ret) {
-			ret = iface_add(&h->rbtree, &e.iface);
-			if (ret)
-				return ret;
-		}
-	} else if (!ret)
-		return ret;
+	nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_PHYSDEV)
 			e.physdev = 1;
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
@@ -613,7 +487,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -630,6 +505,7 @@
 static void __exit
 hash_netiface_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_netiface_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index ea8772a..3c862c0 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -57,8 +57,8 @@
 
 static inline bool
 hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
-		     const struct hash_netnet4_elem *ip2,
-		     u32 *multi)
+			const struct hash_netnet4_elem *ip2,
+			u32 *multi)
 {
 	return ip1->ipcmp == ip2->ipcmp &&
 	       ip1->ccmp == ip2->ccmp;
@@ -84,7 +84,7 @@
 
 static inline void
 hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
-			  struct hash_netnet4_elem *orig)
+			     struct hash_netnet4_elem *orig)
 {
 	elem->ip[1] = orig->ip[1];
 }
@@ -103,7 +103,7 @@
 
 static bool
 hash_netnet4_data_list(struct sk_buff *skb,
-		    const struct hash_netnet4_elem *data)
+		       const struct hash_netnet4_elem *data)
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
@@ -122,28 +122,27 @@
 
 static inline void
 hash_netnet4_data_next(struct hash_netnet4_elem *next,
-		    const struct hash_netnet4_elem *d)
+		       const struct hash_netnet4_elem *d)
 {
 	next->ipcmp = d->ipcmp;
 }
 
 #define MTYPE		hash_netnet4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
 static int
 hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
-	       const struct xt_action_param *par,
-	       enum ipset_adt adt, struct ip_set_adt_opt *opt)
+		  const struct xt_action_param *par,
+		  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
 	const struct hash_netnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netnet4_elem e = { };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
-	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
 	if (adt == IPSET_TEST)
 		e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
 
@@ -157,53 +156,50 @@
 
 static int
 hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
-	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct hash_netnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
-	struct hash_netnet4_elem e = { };
+	struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 ip = 0, ip_to = 0, last;
 	u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
-	u8 cidr, cidr2;
 	int ret;
 
-	e.cidr[0] = e.cidr[1] = HOST_MASK;
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
 	if (tb[IPSET_ATTR_CIDR]) {
-		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-		if (!cidr || cidr > HOST_MASK)
+		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
-		e.cidr[0] = cidr;
 	}
 
 	if (tb[IPSET_ATTR_CIDR2]) {
-		cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-		if (!cidr2 || cidr2 > HOST_MASK)
+		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+		if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
-		e.cidr[1] = cidr2;
 	}
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -226,8 +222,9 @@
 			swap(ip, ip_to);
 		if (unlikely(ip + UINT_MAX == ip_to))
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
+	}
 
 	ip2_to = ip2_from;
 	if (tb[IPSET_ATTR_IP2_TO]) {
@@ -238,28 +235,27 @@
 			swap(ip2_from, ip2_to);
 		if (unlikely(ip2_from + UINT_MAX == ip2_to))
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
+	}
 
 	if (retried)
 		ip = ntohl(h->next.ip[0]);
 
 	while (!after(ip, ip_to)) {
 		e.ip[0] = htonl(ip);
-		last = ip_set_range_to_cidr(ip, ip_to, &cidr);
-		e.cidr[0] = cidr;
+		last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
 		ip2 = (retried &&
 		       ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
 						   : ip2_from;
 		while (!after(ip2, ip2_to)) {
 			e.ip[1] = htonl(ip2);
-			last2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr2);
-			e.cidr[1] = cidr2;
+			last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
 			ret = adtfn(set, &e, &ext, &ext, flags);
 			if (ret && !ip_set_eexist(ret, flags))
 				return ret;
-			else
-				ret = 0;
+
+			ret = 0;
 			ip2 = last2 + 1;
 		}
 		ip = last + 1;
@@ -283,8 +279,8 @@
 
 static inline bool
 hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
-		     const struct hash_netnet6_elem *ip2,
-		     u32 *multi)
+			const struct hash_netnet6_elem *ip2,
+			u32 *multi)
 {
 	return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
 	       ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
@@ -311,7 +307,7 @@
 
 static inline void
 hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
-			  struct hash_netnet6_elem *orig)
+			     struct hash_netnet6_elem *orig)
 {
 	elem->ip[1] = orig->ip[1];
 }
@@ -330,7 +326,7 @@
 
 static bool
 hash_netnet6_data_list(struct sk_buff *skb,
-		    const struct hash_netnet6_elem *data)
+		       const struct hash_netnet6_elem *data)
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
@@ -349,34 +345,32 @@
 
 static inline void
 hash_netnet6_data_next(struct hash_netnet4_elem *next,
-		    const struct hash_netnet6_elem *d)
+		       const struct hash_netnet6_elem *d)
 {
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE		hash_netnet6
-#define PF		6
 #define HOST_MASK	128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
 static int
 hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
-	       const struct xt_action_param *par,
-	       enum ipset_adt adt, struct ip_set_adt_opt *opt)
+		  const struct xt_action_param *par,
+		  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
 	const struct hash_netnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netnet6_elem e = { };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
-	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
 	if (adt == IPSET_TEST)
-		e.ccmp = (HOST_MASK << (sizeof(u8)*8)) | HOST_MASK;
+		e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
 
 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
 	ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
@@ -388,50 +382,52 @@
 
 static int
 hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
-	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	ipset_adtfn adtfn = set->variant->adt[adt];
-	struct hash_netnet6_elem e = { };
+	struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
-	e.cidr[0] = e.cidr[1] = HOST_MASK;
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
-		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
-	      ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR])
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR]) {
 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
-	if (tb[IPSET_ATTR_CIDR2])
+	if (tb[IPSET_ATTR_CIDR2]) {
 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-
-	if (!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
-	    e.cidr[1] > HOST_MASK)
-		return -IPSET_ERR_INVALID_CIDR;
+		if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
 	ip6_netmask(&e.ip[0], e.cidr[0]);
 	ip6_netmask(&e.ip[1], e.cidr[1]);
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -470,7 +466,8 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -487,6 +484,7 @@
 static void __exit
 hash_netnet_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_netnet_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index c0ddb58..731813e 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -110,10 +110,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -125,7 +125,6 @@
 }
 
 #define MTYPE		hash_netport4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
@@ -137,7 +136,7 @@
 	const struct hash_netport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport4_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
@@ -167,23 +166,20 @@
 	u8 cidr;
 	int ret;
 
-	if (unlikely(!tb[IPSET_ATTR_IP] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -194,10 +190,7 @@
 		e.cidr = cidr - 1;
 	}
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -205,8 +198,9 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMP))
 		e.port = 0;
@@ -215,6 +209,7 @@
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -240,8 +235,9 @@
 			swap(ip, ip_to);
 		if (ip + UINT_MAX == ip_to)
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
+	}
 
 	if (retried)
 		ip = ntohl(h->next.ip);
@@ -257,8 +253,8 @@
 
 			if (ret && !ip_set_eexist(ret, flags))
 				return ret;
-			else
-				ret = 0;
+
+			ret = 0;
 		}
 		ip = last + 1;
 	}
@@ -326,10 +322,10 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
@@ -340,11 +336,9 @@
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE		hash_netport6
-#define PF		6
 #define HOST_MASK	128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -357,7 +351,7 @@
 	const struct hash_netport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport6_elem e = {
-		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
+		.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
@@ -387,25 +381,22 @@
 	u8 cidr;
 	int ret;
 
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
 		return -IPSET_ERR_PROTOCOL;
 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
 		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
 
-	if (tb[IPSET_ATTR_LINENO])
-		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+	if (ret)
+		return ret;
 
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
@@ -417,10 +408,7 @@
 	}
 	ip6_netmask(&e.ip, e.cidr + 1);
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -428,14 +416,16 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
 		e.port = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -459,8 +449,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -495,7 +485,8 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -512,6 +503,7 @@
 static void __exit
 hash_netport_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_netport_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index bfaa94c..0c68734 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -54,7 +54,7 @@
 		u16 ccmp;
 	};
 	u16 padding;
-	u8 nomatch:1;
+	u8 nomatch;
 	u8 proto;
 };
 
@@ -62,8 +62,8 @@
 
 static inline bool
 hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
-			   const struct hash_netportnet4_elem *ip2,
-			   u32 *multi)
+			    const struct hash_netportnet4_elem *ip2,
+			    u32 *multi)
 {
 	return ip1->ipcmp == ip2->ipcmp &&
 	       ip1->ccmp == ip2->ccmp &&
@@ -91,7 +91,7 @@
 
 static inline void
 hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
-				struct hash_netportnet4_elem *orig)
+				 struct hash_netportnet4_elem *orig)
 {
 	elem->ip[1] = orig->ip[1];
 }
@@ -111,7 +111,7 @@
 
 static bool
 hash_netportnet4_data_list(struct sk_buff *skb,
-			  const struct hash_netportnet4_elem *data)
+			   const struct hash_netportnet4_elem *data)
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
@@ -124,37 +124,36 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
 hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
-			  const struct hash_netportnet4_elem *d)
+			   const struct hash_netportnet4_elem *d)
 {
 	next->ipcmp = d->ipcmp;
 	next->port = d->port;
 }
 
 #define MTYPE		hash_netportnet4
-#define PF		4
 #define HOST_MASK	32
 #include "ip_set_hash_gen.h"
 
 static int
 hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
-		     const struct xt_action_param *par,
-		     enum ipset_adt adt, struct ip_set_adt_opt *opt)
+		      const struct xt_action_param *par,
+		      enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
 	const struct hash_netportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netportnet4_elem e = { };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
-	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
 	if (adt == IPSET_TEST)
 		e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
 
@@ -172,58 +171,51 @@
 
 static int
 hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
-		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+		      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct hash_netportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
-	struct hash_netportnet4_elem e = { };
+	struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
 	u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
 	bool with_ports = false;
-	u8 cidr, cidr2;
 	int ret;
 
-	e.cidr[0] = e.cidr[1] = HOST_MASK;
-	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
-		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-	      ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
 
 	if (tb[IPSET_ATTR_CIDR]) {
-		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-		if (!cidr || cidr > HOST_MASK)
+		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
-		e.cidr[0] = cidr;
 	}
 
 	if (tb[IPSET_ATTR_CIDR2]) {
-		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-		if (!cidr || cidr > HOST_MASK)
+		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+		if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
 			return -IPSET_ERR_INVALID_CIDR;
-		e.cidr[1] = cidr;
 	}
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -231,14 +223,16 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMP))
 		e.port = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -262,8 +256,9 @@
 			swap(ip, ip_to);
 		if (unlikely(ip + UINT_MAX == ip_to))
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
+	}
 
 	port_to = port = ntohs(e.port);
 	if (tb[IPSET_ATTR_PORT_TO]) {
@@ -281,16 +276,16 @@
 			swap(ip2_from, ip2_to);
 		if (unlikely(ip2_from + UINT_MAX == ip2_to))
 			return -IPSET_ERR_HASH_RANGE;
-	} else
+	} else {
 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
+	}
 
 	if (retried)
 		ip = ntohl(h->next.ip[0]);
 
 	while (!after(ip, ip_to)) {
 		e.ip[0] = htonl(ip);
-		ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
-		e.cidr[0] = cidr;
+		ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
 		p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
 							  : port;
 		for (; p <= port_to; p++) {
@@ -301,13 +296,12 @@
 			while (!after(ip2, ip2_to)) {
 				e.ip[1] = htonl(ip2);
 				ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
-								&cidr2);
-				e.cidr[1] = cidr2;
+								&e.cidr[1]);
 				ret = adtfn(set, &e, &ext, &ext, flags);
 				if (ret && !ip_set_eexist(ret, flags))
 					return ret;
-				else
-					ret = 0;
+
+				ret = 0;
 				ip2 = ip2_last + 1;
 			}
 		}
@@ -326,7 +320,7 @@
 		u16 ccmp;
 	};
 	u16 padding;
-	u8 nomatch:1;
+	u8 nomatch;
 	u8 proto;
 };
 
@@ -334,8 +328,8 @@
 
 static inline bool
 hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
-			   const struct hash_netportnet6_elem *ip2,
-			   u32 *multi)
+			    const struct hash_netportnet6_elem *ip2,
+			    u32 *multi)
 {
 	return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
 	       ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
@@ -364,7 +358,7 @@
 
 static inline void
 hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
-				struct hash_netportnet6_elem *orig)
+				 struct hash_netportnet6_elem *orig)
 {
 	elem->ip[1] = orig->ip[1];
 }
@@ -384,7 +378,7 @@
 
 static bool
 hash_netportnet6_data_list(struct sk_buff *skb,
-			  const struct hash_netportnet6_elem *data)
+			   const struct hash_netportnet6_elem *data)
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
@@ -397,41 +391,39 @@
 	    (flags &&
 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 		goto nla_put_failure;
-	return 0;
+	return false;
 
 nla_put_failure:
-	return 1;
+	return true;
 }
 
 static inline void
 hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
-			  const struct hash_netportnet6_elem *d)
+			   const struct hash_netportnet6_elem *d)
 {
 	next->port = d->port;
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE		hash_netportnet6
-#define PF		6
 #define HOST_MASK	128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
 static int
 hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
-		     const struct xt_action_param *par,
-		     enum ipset_adt adt, struct ip_set_adt_opt *opt)
+		      const struct xt_action_param *par,
+		      enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
 	const struct hash_netportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netportnet6_elem e = { };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
-	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
 	if (adt == IPSET_TEST)
 		e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
 
@@ -449,57 +441,55 @@
 
 static int
 hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
-		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+		      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct hash_netportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
-	struct hash_netportnet6_elem e = { };
+	struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 port, port_to;
 	bool with_ports = false;
 	int ret;
 
-	e.cidr[0] = e.cidr[1] = HOST_MASK;
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
 		return -IPSET_ERR_PROTOCOL;
 	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
 		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
 
-	if (tb[IPSET_ATTR_LINENO])
-		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
-
-	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
-	      ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
-	      ip_set_get_extensions(set, tb, &ext);
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR])
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR]) {
 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
-	if (tb[IPSET_ATTR_CIDR2])
+	if (tb[IPSET_ATTR_CIDR2]) {
 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-
-	if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
-		     e.cidr[1] > HOST_MASK))
-		return -IPSET_ERR_INVALID_CIDR;
+		if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
 	ip6_netmask(&e.ip[0], e.cidr[0]);
 	ip6_netmask(&e.ip[1], e.cidr[1]);
 
-	if (tb[IPSET_ATTR_PORT])
-		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-	else
-		return -IPSET_ERR_PROTOCOL;
+	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
 	if (tb[IPSET_ATTR_PROTO]) {
 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -507,14 +497,16 @@
 
 		if (e.proto == 0)
 			return -IPSET_ERR_INVALID_PROTO;
-	} else
+	} else {
 		return -IPSET_ERR_MISSING_PROTO;
+	}
 
 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
 		e.port = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		if (cadt_flags & IPSET_FLAG_NOMATCH)
 			flags |= (IPSET_FLAG_NOMATCH << 16);
 	}
@@ -538,8 +530,8 @@
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
-		else
-			ret = 0;
+
+		ret = 0;
 	}
 	return ret;
 }
@@ -577,7 +569,8 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -594,6 +587,7 @@
 static void __exit
 hash_netportnet_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&hash_netportnet_type);
 }
 
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index f8f6828..a1fe537 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -9,6 +9,7 @@
 
 #include <linux/module.h>
 #include <linux/ip.h>
+#include <linux/rculist.h>
 #include <linux/skbuff.h>
 #include <linux/errno.h>
 
@@ -27,6 +28,8 @@
 
 /* Member elements  */
 struct set_elem {
+	struct rcu_head rcu;
+	struct list_head list;
 	ip_set_id_t id;
 };
 
@@ -41,12 +44,9 @@
 	u32 size;		/* size of set list array */
 	struct timer_list gc;	/* garbage collection */
 	struct net *net;	/* namespace */
-	struct set_elem members[0]; /* the set members */
+	struct list_head members; /* the set members */
 };
 
-#define list_set_elem(set, map, id)	\
-	(struct set_elem *)((void *)(map)->members + (id) * (set)->dsize)
-
 static int
 list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
 	       const struct xt_action_param *par,
@@ -54,17 +54,14 @@
 {
 	struct list_set *map = set->data;
 	struct set_elem *e;
-	u32 i, cmdflags = opt->cmdflags;
+	u32 cmdflags = opt->cmdflags;
 	int ret;
 
 	/* Don't lookup sub-counters at all */
 	opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
 	if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
 		opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			return 0;
+	list_for_each_entry_rcu(e, &map->members, list) {
 		if (SET_WITH_TIMEOUT(set) &&
 		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
@@ -91,13 +88,9 @@
 {
 	struct list_set *map = set->data;
 	struct set_elem *e;
-	u32 i;
 	int ret;
 
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			return 0;
+	list_for_each_entry(e, &map->members, list) {
 		if (SET_WITH_TIMEOUT(set) &&
 		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
@@ -115,13 +108,9 @@
 {
 	struct list_set *map = set->data;
 	struct set_elem *e;
-	u32 i;
 	int ret;
 
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			return 0;
+	list_for_each_entry(e, &map->members, list) {
 		if (SET_WITH_TIMEOUT(set) &&
 		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
@@ -138,110 +127,65 @@
 	      enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+	int ret = -EINVAL;
 
+	rcu_read_lock();
 	switch (adt) {
 	case IPSET_TEST:
-		return list_set_ktest(set, skb, par, opt, &ext);
+		ret = list_set_ktest(set, skb, par, opt, &ext);
+		break;
 	case IPSET_ADD:
-		return list_set_kadd(set, skb, par, opt, &ext);
+		ret = list_set_kadd(set, skb, par, opt, &ext);
+		break;
 	case IPSET_DEL:
-		return list_set_kdel(set, skb, par, opt, &ext);
+		ret = list_set_kdel(set, skb, par, opt, &ext);
+		break;
 	default:
 		break;
 	}
-	return -EINVAL;
+	rcu_read_unlock();
+
+	return ret;
 }
 
-static bool
-id_eq(const struct ip_set *set, u32 i, ip_set_id_t id)
-{
-	const struct list_set *map = set->data;
-	const struct set_elem *e;
+/* Userspace interfaces: we are protected by the nfnl mutex */
 
-	if (i >= map->size)
-		return 0;
-
-	e = list_set_elem(set, map, i);
-	return !!(e->id == id &&
-		 !(SET_WITH_TIMEOUT(set) &&
-		   ip_set_timeout_expired(ext_timeout(e, set))));
-}
-
-static int
-list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
-	     const struct ip_set_ext *ext)
+static void
+__list_set_del(struct ip_set *set, struct set_elem *e)
 {
 	struct list_set *map = set->data;
-	struct set_elem *e = list_set_elem(set, map, i);
-
-	if (e->id != IPSET_INVALID_ID) {
-		if (i == map->size - 1) {
-			/* Last element replaced: e.g. add new,before,last */
-			ip_set_put_byindex(map->net, e->id);
-			ip_set_ext_destroy(set, e);
-		} else {
-			struct set_elem *x = list_set_elem(set, map,
-							   map->size - 1);
-
-			/* Last element pushed off */
-			if (x->id != IPSET_INVALID_ID) {
-				ip_set_put_byindex(map->net, x->id);
-				ip_set_ext_destroy(set, x);
-			}
-			memmove(list_set_elem(set, map, i + 1), e,
-				set->dsize * (map->size - (i + 1)));
-			/* Extensions must be initialized to zero */
-			memset(e, 0, set->dsize);
-		}
-	}
-
-	e->id = d->id;
-	if (SET_WITH_TIMEOUT(set))
-		ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
-	if (SET_WITH_COUNTER(set))
-		ip_set_init_counter(ext_counter(e, set), ext);
-	if (SET_WITH_COMMENT(set))
-		ip_set_init_comment(ext_comment(e, set), ext);
-	if (SET_WITH_SKBINFO(set))
-		ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
-	return 0;
-}
-
-static int
-list_set_del(struct ip_set *set, u32 i)
-{
-	struct list_set *map = set->data;
-	struct set_elem *e = list_set_elem(set, map, i);
 
 	ip_set_put_byindex(map->net, e->id);
+	/* We may call it, because we don't have a to be destroyed
+	 * extension which is used by the kernel.
+	 */
 	ip_set_ext_destroy(set, e);
+	kfree_rcu(e, rcu);
+}
 
-	if (i < map->size - 1)
-		memmove(e, list_set_elem(set, map, i + 1),
-			set->dsize * (map->size - (i + 1)));
+static inline void
+list_set_del(struct ip_set *set, struct set_elem *e)
+{
+	list_del_rcu(&e->list);
+	__list_set_del(set, e);
+}
 
-	/* Last element */
-	e = list_set_elem(set, map, map->size - 1);
-	e->id = IPSET_INVALID_ID;
-	return 0;
+static inline void
+list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
+{
+	list_replace_rcu(&old->list, &e->list);
+	__list_set_del(set, old);
 }
 
 static void
 set_cleanup_entries(struct ip_set *set)
 {
 	struct list_set *map = set->data;
-	struct set_elem *e;
-	u32 i = 0;
+	struct set_elem *e, *n;
 
-	while (i < map->size) {
-		e = list_set_elem(set, map, i);
-		if (e->id != IPSET_INVALID_ID &&
-		    ip_set_timeout_expired(ext_timeout(e, set)))
-			list_set_del(set, i);
-			/* Check element moved to position i in next loop */
-		else
-			i++;
-	}
+	list_for_each_entry_safe(e, n, &map->members, list)
+		if (ip_set_timeout_expired(ext_timeout(e, set)))
+			list_set_del(set, e);
 }
 
 static int
@@ -250,31 +194,46 @@
 {
 	struct list_set *map = set->data;
 	struct set_adt_elem *d = value;
-	struct set_elem *e;
-	u32 i;
+	struct set_elem *e, *next, *prev = NULL;
 	int ret;
 
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			return 0;
-		else if (SET_WITH_TIMEOUT(set) &&
-			 ip_set_timeout_expired(ext_timeout(e, set)))
+	list_for_each_entry(e, &map->members, list) {
+		if (SET_WITH_TIMEOUT(set) &&
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
-		else if (e->id != d->id)
+		else if (e->id != d->id) {
+			prev = e;
 			continue;
+		}
 
-		if (d->before == 0)
-			return 1;
-		else if (d->before > 0)
-			ret = id_eq(set, i + 1, d->refid);
-		else
-			ret = i > 0 && id_eq(set, i - 1, d->refid);
+		if (d->before == 0) {
+			ret = 1;
+		} else if (d->before > 0) {
+			next = list_next_entry(e, list);
+			ret = !list_is_last(&e->list, &map->members) &&
+			      next->id == d->refid;
+		} else {
+			ret = prev && prev->id == d->refid;
+		}
 		return ret;
 	}
 	return 0;
 }
 
+static void
+list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext,
+			 struct set_elem *e)
+{
+	if (SET_WITH_COUNTER(set))
+		ip_set_init_counter(ext_counter(e, set), ext);
+	if (SET_WITH_COMMENT(set))
+		ip_set_init_comment(ext_comment(e, set), ext);
+	if (SET_WITH_SKBINFO(set))
+		ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
+	/* Update timeout last */
+	if (SET_WITH_TIMEOUT(set))
+		ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
+}
 
 static int
 list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
@@ -282,60 +241,78 @@
 {
 	struct list_set *map = set->data;
 	struct set_adt_elem *d = value;
-	struct set_elem *e;
+	struct set_elem *e, *n, *prev, *next;
 	bool flag_exist = flags & IPSET_FLAG_EXIST;
-	u32 i, ret = 0;
 
 	if (SET_WITH_TIMEOUT(set))
 		set_cleanup_entries(set);
 
-	/* Check already added element */
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			goto insert;
-		else if (e->id != d->id)
+	/* Find where to add the new entry */
+	n = prev = next = NULL;
+	list_for_each_entry(e, &map->members, list) {
+		if (SET_WITH_TIMEOUT(set) &&
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
-
-		if ((d->before > 1 && !id_eq(set, i + 1, d->refid)) ||
-		    (d->before < 0 &&
-		     (i == 0 || !id_eq(set, i - 1, d->refid))))
-			/* Before/after doesn't match */
+		else if (d->id == e->id)
+			n = e;
+		else if (d->before == 0 || e->id != d->refid)
+			continue;
+		else if (d->before > 0)
+			next = e;
+		else
+			prev = e;
+	}
+	/* Re-add already existing element */
+	if (n) {
+		if ((d->before > 0 && !next) ||
+		    (d->before < 0 && !prev))
 			return -IPSET_ERR_REF_EXIST;
 		if (!flag_exist)
-			/* Can't re-add */
 			return -IPSET_ERR_EXIST;
 		/* Update extensions */
-		ip_set_ext_destroy(set, e);
+		ip_set_ext_destroy(set, n);
+		list_set_init_extensions(set, ext, n);
 
-		if (SET_WITH_TIMEOUT(set))
-			ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
-		if (SET_WITH_COUNTER(set))
-			ip_set_init_counter(ext_counter(e, set), ext);
-		if (SET_WITH_COMMENT(set))
-			ip_set_init_comment(ext_comment(e, set), ext);
-		if (SET_WITH_SKBINFO(set))
-			ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
 		/* Set is already added to the list */
 		ip_set_put_byindex(map->net, d->id);
 		return 0;
 	}
-insert:
-	ret = -IPSET_ERR_LIST_FULL;
-	for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
-				: list_set_add(set, i, d, ext);
-		else if (e->id != d->refid)
-			continue;
-		else if (d->before > 0)
-			ret = list_set_add(set, i, d, ext);
-		else if (i + 1 < map->size)
-			ret = list_set_add(set, i + 1, d, ext);
+	/* Add new entry */
+	if (d->before == 0) {
+		/* Append  */
+		n = list_empty(&map->members) ? NULL :
+		    list_last_entry(&map->members, struct set_elem, list);
+	} else if (d->before > 0) {
+		/* Insert after next element */
+		if (!list_is_last(&next->list, &map->members))
+			n = list_next_entry(next, list);
+	} else {
+		/* Insert before prev element */
+		if (prev->list.prev != &map->members)
+			n = list_prev_entry(prev, list);
 	}
+	/* Can we replace a timed out entry? */
+	if (n &&
+	    !(SET_WITH_TIMEOUT(set) &&
+	      ip_set_timeout_expired(ext_timeout(n, set))))
+		n =  NULL;
 
-	return ret;
+	e = kzalloc(set->dsize, GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
+	e->id = d->id;
+	INIT_LIST_HEAD(&e->list);
+	list_set_init_extensions(set, ext, e);
+	if (n)
+		list_set_replace(set, e, n);
+	else if (next)
+		list_add_tail_rcu(&e->list, &next->list);
+	else if (prev)
+		list_add_rcu(&e->list, &prev->list);
+	else
+		list_add_tail_rcu(&e->list, &map->members);
+
+	return 0;
 }
 
 static int
@@ -344,32 +321,30 @@
 {
 	struct list_set *map = set->data;
 	struct set_adt_elem *d = value;
-	struct set_elem *e;
-	u32 i;
+	struct set_elem *e, *next, *prev = NULL;
 
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			return d->before != 0 ? -IPSET_ERR_REF_EXIST
-					      : -IPSET_ERR_EXIST;
-		else if (SET_WITH_TIMEOUT(set) &&
-			 ip_set_timeout_expired(ext_timeout(e, set)))
+	list_for_each_entry(e, &map->members, list) {
+		if (SET_WITH_TIMEOUT(set) &&
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
-		else if (e->id != d->id)
+		else if (e->id != d->id) {
+			prev = e;
 			continue;
+		}
 
-		if (d->before == 0)
-			return list_set_del(set, i);
-		else if (d->before > 0) {
-			if (!id_eq(set, i + 1, d->refid))
+		if (d->before > 0) {
+			next = list_next_entry(e, list);
+			if (list_is_last(&e->list, &map->members) ||
+			    next->id != d->refid)
 				return -IPSET_ERR_REF_EXIST;
-			return list_set_del(set, i);
-		} else if (i == 0 || !id_eq(set, i - 1, d->refid))
-			return -IPSET_ERR_REF_EXIST;
-		else
-			return list_set_del(set, i);
+		} else if (d->before < 0) {
+			if (!prev || prev->id != d->refid)
+				return -IPSET_ERR_REF_EXIST;
+		}
+		list_set_del(set, e);
+		return 0;
 	}
-	return -IPSET_ERR_EXIST;
+	return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST;
 }
 
 static int
@@ -383,19 +358,13 @@
 	struct ip_set *s;
 	int ret = 0;
 
-	if (unlikely(!tb[IPSET_ATTR_NAME] ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
-		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
-		return -IPSET_ERR_PROTOCOL;
-
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+	if (unlikely(!tb[IPSET_ATTR_NAME] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
 	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
@@ -410,6 +379,7 @@
 
 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
 		u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+
 		e.before = f & IPSET_FLAG_BEFORE;
 	}
 
@@ -447,27 +417,26 @@
 list_set_flush(struct ip_set *set)
 {
 	struct list_set *map = set->data;
-	struct set_elem *e;
-	u32 i;
+	struct set_elem *e, *n;
 
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(set, map, i);
-		if (e->id != IPSET_INVALID_ID) {
-			ip_set_put_byindex(map->net, e->id);
-			ip_set_ext_destroy(set, e);
-			e->id = IPSET_INVALID_ID;
-		}
-	}
+	list_for_each_entry_safe(e, n, &map->members, list)
+		list_set_del(set, e);
 }
 
 static void
 list_set_destroy(struct ip_set *set)
 {
 	struct list_set *map = set->data;
+	struct set_elem *e, *n;
 
 	if (SET_WITH_TIMEOUT(set))
 		del_timer_sync(&map->gc);
-	list_set_flush(set);
+	list_for_each_entry_safe(e, n, &map->members, list) {
+		list_del(&e->list);
+		ip_set_put_byindex(map->net, e->id);
+		ip_set_ext_destroy(set, e);
+		kfree(e);
+	}
 	kfree(map);
 
 	set->data = NULL;
@@ -478,6 +447,11 @@
 {
 	const struct list_set *map = set->data;
 	struct nlattr *nested;
+	struct set_elem *e;
+	u32 n = 0;
+
+	list_for_each_entry(e, &map->members, list)
+		n++;
 
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
@@ -485,7 +459,7 @@
 	if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
 	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
-			  htonl(sizeof(*map) + map->size * set->dsize)))
+			  htonl(sizeof(*map) + n * set->dsize)))
 		goto nla_put_failure;
 	if (unlikely(ip_set_put_flags(skb, set)))
 		goto nla_put_failure;
@@ -502,18 +476,22 @@
 {
 	const struct list_set *map = set->data;
 	struct nlattr *atd, *nested;
-	u32 i, first = cb->args[IPSET_CB_ARG0];
-	const struct set_elem *e;
+	u32 i = 0, first = cb->args[IPSET_CB_ARG0];
+	struct set_elem *e;
+	int ret = 0;
 
 	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
 	if (!atd)
 		return -EMSGSIZE;
-	for (; cb->args[IPSET_CB_ARG0] < map->size;
-	     cb->args[IPSET_CB_ARG0]++) {
-		i = cb->args[IPSET_CB_ARG0];
-		e = list_set_elem(set, map, i);
-		if (e->id == IPSET_INVALID_ID)
-			goto finish;
+	list_for_each_entry(e, &map->members, list) {
+		if (i == first)
+			break;
+		i++;
+	}
+
+	rcu_read_lock();
+	list_for_each_entry_from(e, &map->members, list) {
+		i++;
 		if (SET_WITH_TIMEOUT(set) &&
 		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
@@ -521,9 +499,10 @@
 		if (!nested) {
 			if (i == first) {
 				nla_nest_cancel(skb, atd);
-				return -EMSGSIZE;
-			} else
-				goto nla_put_failure;
+				ret = -EMSGSIZE;
+				goto out;
+			}
+			goto nla_put_failure;
 		}
 		if (nla_put_string(skb, IPSET_ATTR_NAME,
 				   ip_set_name_byindex(map->net, e->id)))
@@ -532,20 +511,23 @@
 			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
-finish:
+
 	ipset_nest_end(skb, atd);
 	/* Set listing finished */
 	cb->args[IPSET_CB_ARG0] = 0;
-	return 0;
+	goto out;
 
 nla_put_failure:
 	nla_nest_cancel(skb, nested);
 	if (unlikely(i == first)) {
 		cb->args[IPSET_CB_ARG0] = 0;
-		return -EMSGSIZE;
+		ret = -EMSGSIZE;
 	}
+	cb->args[IPSET_CB_ARG0] = i - 1;
 	ipset_nest_end(skb, atd);
-	return 0;
+out:
+	rcu_read_unlock();
+	return ret;
 }
 
 static bool
@@ -577,12 +559,12 @@
 static void
 list_set_gc(unsigned long ul_set)
 {
-	struct ip_set *set = (struct ip_set *) ul_set;
+	struct ip_set *set = (struct ip_set *)ul_set;
 	struct list_set *map = set->data;
 
-	write_lock_bh(&set->lock);
+	spin_lock_bh(&set->lock);
 	set_cleanup_entries(set);
-	write_unlock_bh(&set->lock);
+	spin_unlock_bh(&set->lock);
 
 	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
@@ -594,7 +576,7 @@
 	struct list_set *map = set->data;
 
 	init_timer(&map->gc);
-	map->gc.data = (unsigned long) set;
+	map->gc.data = (unsigned long)set;
 	map->gc.function = gc;
 	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
@@ -606,24 +588,16 @@
 init_list_set(struct net *net, struct ip_set *set, u32 size)
 {
 	struct list_set *map;
-	struct set_elem *e;
-	u32 i;
 
-	map = kzalloc(sizeof(*map) +
-		      min_t(u32, size, IP_SET_LIST_MAX_SIZE) * set->dsize,
-		      GFP_KERNEL);
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
 	if (!map)
 		return false;
 
 	map->size = size;
 	map->net = net;
+	INIT_LIST_HEAD(&map->members);
 	set->data = map;
 
-	for (i = 0; i < size; i++) {
-		e = list_set_elem(set, map, i);
-		e->id = IPSET_INVALID_ID;
-	}
-
 	return true;
 }
 
@@ -678,7 +652,8 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
-		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING,
+					    .len  = IPSET_MAX_COMMENT_SIZE },
 		[IPSET_ATTR_SKBMARK]	= { .type = NLA_U64 },
 		[IPSET_ATTR_SKBPRIO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_SKBQUEUE]	= { .type = NLA_U16 },
@@ -695,6 +670,7 @@
 static void __exit
 list_set_fini(void)
 {
+	rcu_barrier();
 	ip_set_type_unregister(&list_set_type);
 }
 
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
index 04d15fd..1c8a42c 100644
--- a/net/netfilter/ipset/pfxlen.c
+++ b/net/netfilter/ipset/pfxlen.c
@@ -1,9 +1,7 @@
 #include <linux/export.h>
 #include <linux/netfilter/ipset/pfxlen.h>
 
-/*
- * Prefixlen maps for fast conversions, by Jan Engelhardt.
- */
+/* Prefixlen maps for fast conversions, by Jan Engelhardt. */
 
 #define E(a, b, c, d) \
 	{.ip6 = { \
@@ -11,8 +9,7 @@
 		htonl(c), htonl(d), \
 	} }
 
-/*
- * This table works for both IPv4 and IPv6;
+/* This table works for both IPv4 and IPv6;
  * just use prefixlen_netmask_map[prefixlength].ip.
  */
 const union nf_inet_addr ip_set_netmask_map[] = {
@@ -149,13 +146,12 @@
 EXPORT_SYMBOL_GPL(ip_set_netmask_map);
 
 #undef  E
-#define E(a, b, c, d)						\
-	{.ip6 = { (__force __be32) a, (__force __be32) b,	\
-		  (__force __be32) c, (__force __be32) d,	\
+#define E(a, b, c, d)					\
+	{.ip6 = { (__force __be32)a, (__force __be32)b,	\
+		  (__force __be32)c, (__force __be32)d,	\
 	} }
 
-/*
- * This table works for both IPv4 and IPv6;
+/* This table works for both IPv4 and IPv6;
  * just use prefixlen_hostmask_map[prefixlength].ip.
  */
 const union nf_inet_addr ip_set_hostmask_map[] = {
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 19b9cce..b08ba95 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1457,18 +1457,12 @@
 	struct socket *sock;
 	int result;
 
-	/* First create a socket move it to right name space later */
-	result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	/* First create a socket */
+	result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
 	}
-	/*
-	 * Kernel sockets that are a part of a namespace, should not
-	 * hold a reference to a namespace in order to allow to stop it.
-	 * After sk_change_net should be released using sk_release_kernel.
-	 */
-	sk_change_net(sock->sk, net);
 	result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
 	if (result < 0) {
 		pr_err("Error setting outbound mcast interface\n");
@@ -1497,7 +1491,7 @@
 	return sock;
 
 error:
-	sk_release_kernel(sock->sk);
+	sock_release(sock);
 	return ERR_PTR(result);
 }
 
@@ -1518,17 +1512,11 @@
 	int result;
 
 	/* First create a socket */
-	result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
 	}
-	/*
-	 * Kernel sockets that are a part of a namespace, should not
-	 * hold a reference to a namespace in order to allow to stop it.
-	 * After sk_change_net should be released using sk_release_kernel.
-	 */
-	sk_change_net(sock->sk, net);
 	/* it is equivalent to the REUSEADDR option in user-space */
 	sock->sk->sk_reuse = SK_CAN_REUSE;
 	result = sysctl_sync_sock_size(ipvs);
@@ -1554,7 +1542,7 @@
 	return sock;
 
 error:
-	sk_release_kernel(sock->sk);
+	sock_release(sock);
 	return ERR_PTR(result);
 }
 
@@ -1692,7 +1680,7 @@
 		ip_vs_sync_buff_release(sb);
 
 	/* release the sending multicast socket */
-	sk_release_kernel(tinfo->sock->sk);
+	sock_release(tinfo->sock);
 	kfree(tinfo);
 
 	return 0;
@@ -1729,7 +1717,7 @@
 	}
 
 	/* release the sending multicast socket */
-	sk_release_kernel(tinfo->sock->sk);
+	sock_release(tinfo->sock);
 	kfree(tinfo->buf);
 	kfree(tinfo);
 
@@ -1854,11 +1842,11 @@
 	return 0;
 
 outsocket:
-	sk_release_kernel(sock->sk);
+	sock_release(sock);
 
 outtinfo:
 	if (tinfo) {
-		sk_release_kernel(tinfo->sock->sk);
+		sock_release(tinfo->sock);
 		kfree(tinfo->buf);
 		kfree(tinfo);
 	}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 19986ec..bf66a86 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -364,13 +364,16 @@
 #ifdef CONFIG_IP_VS_IPV6
 static struct dst_entry *
 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
-			struct in6_addr *ret_saddr, int do_xfrm)
+			struct in6_addr *ret_saddr, int do_xfrm, int rt_mode)
 {
 	struct dst_entry *dst;
 	struct flowi6 fl6 = {
 		.daddr = *daddr,
 	};
 
+	if (rt_mode & IP_VS_RT_MODE_KNOWN_NH)
+		fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
+
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error)
 		goto out_err;
@@ -427,7 +430,7 @@
 			}
 			dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
 						      &dest_dst->dst_saddr.in6,
-						      do_xfrm);
+						      do_xfrm, rt_mode);
 			if (!dst) {
 				__ip_vs_dst_set(dest, NULL, NULL, 0);
 				spin_unlock_bh(&dest->dst_lock);
@@ -435,7 +438,7 @@
 				goto err_unreach;
 			}
 			rt = (struct rt6_info *) dst;
-			cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+			cookie = rt6_get_cookie(rt);
 			__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
 			spin_unlock_bh(&dest->dst_lock);
 			IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
@@ -446,7 +449,8 @@
 			*ret_saddr = dest_dst->dst_saddr.in6;
 	} else {
 		noref = 0;
-		dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
+		dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm,
+					      rt_mode);
 		if (!dst)
 			goto err_unreach;
 		rt = (struct rt6_info *) dst;
@@ -781,7 +785,7 @@
 
 	/* From world but DNAT to loopback address? */
 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
-	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
+	    ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
 		IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
 				 "ip_vs_nat_xmit_v6(): "
 				 "stopping DNAT to loopback address");
@@ -1164,7 +1168,8 @@
 	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
 				      NULL, ipvsh, 0,
 				      IP_VS_RT_MODE_LOCAL |
-				      IP_VS_RT_MODE_NON_LOCAL);
+				      IP_VS_RT_MODE_NON_LOCAL |
+				      IP_VS_RT_MODE_KNOWN_NH);
 	if (local < 0)
 		goto tx_error;
 	if (local) {
@@ -1346,7 +1351,7 @@
 
 	/* From world but DNAT to loopback address? */
 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
-	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
+	    ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
 		IP_VS_DBG(1, "%s(): "
 			  "stopping DNAT to loopback %pI6\n",
 			  __func__, &cp->daddr.in6);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 1d69f5b..9511af0 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -779,8 +779,8 @@
 				   flowi6_to_flowi(&fl1), false)) {
 			if (!afinfo->route(net, (struct dst_entry **)&rt2,
 					   flowi6_to_flowi(&fl2), false)) {
-				if (ipv6_addr_equal(rt6_nexthop(rt1),
-						    rt6_nexthop(rt2)) &&
+				if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
+						    rt6_nexthop(rt2, &fl2.daddr)) &&
 				    rt1->dst.dev == rt2->dst.dev)
 					ret = 1;
 				dst_release(&rt2->dst);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 60865f1..2281be4 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -90,7 +90,13 @@
 static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
 			unsigned int dataoff, unsigned int *timeouts)
 {
-	return nf_generic_should_process(nf_ct_protonum(ct));
+	bool ret;
+
+	ret = nf_generic_should_process(nf_ct_protonum(ct));
+	if (!ret)
+		pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
+			     nf_ct_protonum(ct));
+	return ret;
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index ea7f367..3992106 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -19,6 +19,7 @@
 /* nf_queue.c */
 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
 	     struct nf_hook_state *state, unsigned int queuenum);
+void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
 int __init netfilter_queue_init(void);
 
 /* nf_log.c */
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 2e88032..cd60d39 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -105,6 +105,23 @@
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
+void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
+{
+	const struct nf_queue_handler *qh;
+	struct net *net;
+
+	rtnl_lock();
+	rcu_read_lock();
+	qh = rcu_dereference(queue_handler);
+	if (qh) {
+		for_each_net(net) {
+			qh->nf_hook_drop(net, ops);
+		}
+	}
+	rcu_read_unlock();
+	rtnl_unlock();
+}
+
 /*
  * Any packet that leaves via this function must come back
  * through nf_reinject().
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 52e20c9..789feea 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -11,6 +11,7 @@
 #include <asm/unaligned.h>
 #include <net/tcp.h>
 #include <net/netns/generic.h>
+#include <linux/proc_fs.h>
 
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 34ded09..cfe6368 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -127,13 +127,46 @@
 	kfree(trans);
 }
 
+int nft_register_basechain(struct nft_base_chain *basechain,
+			   unsigned int hook_nops)
+{
+	if (basechain->flags & NFT_BASECHAIN_DISABLED)
+		return 0;
+
+	return nf_register_hooks(basechain->ops, hook_nops);
+}
+EXPORT_SYMBOL_GPL(nft_register_basechain);
+
+void nft_unregister_basechain(struct nft_base_chain *basechain,
+			      unsigned int hook_nops)
+{
+	if (basechain->flags & NFT_BASECHAIN_DISABLED)
+		return;
+
+	nf_unregister_hooks(basechain->ops, hook_nops);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_basechain);
+
+static int nf_tables_register_hooks(const struct nft_table *table,
+				    struct nft_chain *chain,
+				    unsigned int hook_nops)
+{
+	if (table->flags & NFT_TABLE_F_DORMANT ||
+	    !(chain->flags & NFT_BASE_CHAIN))
+		return 0;
+
+	return nft_register_basechain(nft_base_chain(chain), hook_nops);
+}
+
 static void nf_tables_unregister_hooks(const struct nft_table *table,
-				       const struct nft_chain *chain,
+				       struct nft_chain *chain,
 				       unsigned int hook_nops)
 {
-	if (!(table->flags & NFT_TABLE_F_DORMANT) &&
-	    chain->flags & NFT_BASE_CHAIN)
-		nf_unregister_hooks(nft_base_chain(chain)->ops, hook_nops);
+	if (table->flags & NFT_TABLE_F_DORMANT ||
+	    !(chain->flags & NFT_BASE_CHAIN))
+		return;
+
+	nft_unregister_basechain(nft_base_chain(chain), hook_nops);
 }
 
 /* Internal table flags */
@@ -560,7 +593,7 @@
 		if (!(chain->flags & NFT_BASE_CHAIN))
 			continue;
 
-		err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
+		err = nft_register_basechain(nft_base_chain(chain), afi->nops);
 		if (err < 0)
 			goto err;
 
@@ -575,20 +608,20 @@
 		if (i-- <= 0)
 			break;
 
-		nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
+		nft_unregister_basechain(nft_base_chain(chain), afi->nops);
 	}
 	return err;
 }
 
 static void nf_tables_table_disable(const struct nft_af_info *afi,
-				   struct nft_table *table)
+				    struct nft_table *table)
 {
 	struct nft_chain *chain;
 
 	list_for_each_entry(chain, &table->chains, list) {
 		if (chain->flags & NFT_BASE_CHAIN)
-			nf_unregister_hooks(nft_base_chain(chain)->ops,
-					    afi->nops);
+			nft_unregister_basechain(nft_base_chain(chain),
+						 afi->nops);
 	}
 }
 
@@ -679,13 +712,14 @@
 			return -EINVAL;
 	}
 
+	err = -EAFNOSUPPORT;
 	if (!try_module_get(afi->owner))
-		return -EAFNOSUPPORT;
+		goto err1;
 
 	err = -ENOMEM;
 	table = kzalloc(sizeof(*table), GFP_KERNEL);
 	if (table == NULL)
-		goto err1;
+		goto err2;
 
 	nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
 	INIT_LIST_HEAD(&table->chains);
@@ -695,14 +729,15 @@
 	nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
 	err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
 	if (err < 0)
-		goto err2;
+		goto err3;
 
 	list_add_tail_rcu(&table->list, &afi->tables);
 	return 0;
-err2:
+err3:
 	kfree(table);
-err1:
+err2:
 	module_put(afi->owner);
+err1:
 	return err;
 }
 
@@ -881,6 +916,8 @@
 static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
 	[NFTA_HOOK_HOOKNUM]	= { .type = NLA_U32 },
 	[NFTA_HOOK_PRIORITY]	= { .type = NLA_U32 },
+	[NFTA_HOOK_DEV]		= { .type = NLA_STRING,
+				    .len = IFNAMSIZ - 1 },
 };
 
 static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
@@ -954,6 +991,9 @@
 			goto nla_put_failure;
 		if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
 			goto nla_put_failure;
+		if (basechain->dev_name[0] &&
+		    nla_put_string(skb, NFTA_HOOK_DEV, basechain->dev_name))
+			goto nla_put_failure;
 		nla_nest_end(skb, nest);
 
 		if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
@@ -1165,9 +1205,13 @@
 	BUG_ON(chain->use > 0);
 
 	if (chain->flags & NFT_BASE_CHAIN) {
-		module_put(nft_base_chain(chain)->type->owner);
-		free_percpu(nft_base_chain(chain)->stats);
-		kfree(nft_base_chain(chain));
+		struct nft_base_chain *basechain = nft_base_chain(chain);
+
+		module_put(basechain->type->owner);
+		free_percpu(basechain->stats);
+		if (basechain->ops[0].dev != NULL)
+			dev_put(basechain->ops[0].dev);
+		kfree(basechain);
 	} else {
 		kfree(chain);
 	}
@@ -1186,6 +1230,7 @@
 	struct nlattr *ha[NFTA_HOOK_MAX + 1];
 	struct net *net = sock_net(skb->sk);
 	int family = nfmsg->nfgen_family;
+	struct net_device *dev = NULL;
 	u8 policy = NF_ACCEPT;
 	u64 handle = 0;
 	unsigned int i;
@@ -1325,17 +1370,43 @@
 			return -ENOENT;
 		hookfn = type->hooks[hooknum];
 
+		if (afi->flags & NFT_AF_NEEDS_DEV) {
+			char ifname[IFNAMSIZ];
+
+			if (!ha[NFTA_HOOK_DEV]) {
+				module_put(type->owner);
+				return -EOPNOTSUPP;
+			}
+
+			nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ);
+			dev = dev_get_by_name(net, ifname);
+			if (!dev) {
+				module_put(type->owner);
+				return -ENOENT;
+			}
+		} else if (ha[NFTA_HOOK_DEV]) {
+			module_put(type->owner);
+			return -EOPNOTSUPP;
+		}
+
 		basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
 		if (basechain == NULL) {
 			module_put(type->owner);
+			if (dev != NULL)
+				dev_put(dev);
 			return -ENOMEM;
 		}
 
+		if (dev != NULL)
+			strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
+
 		if (nla[NFTA_CHAIN_COUNTERS]) {
 			stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
 			if (IS_ERR(stats)) {
 				module_put(type->owner);
 				kfree(basechain);
+				if (dev != NULL)
+					dev_put(dev);
 				return PTR_ERR(stats);
 			}
 			basechain->stats = stats;
@@ -1344,6 +1415,8 @@
 			if (stats == NULL) {
 				module_put(type->owner);
 				kfree(basechain);
+				if (dev != NULL)
+					dev_put(dev);
 				return -ENOMEM;
 			}
 			rcu_assign_pointer(basechain->stats, stats);
@@ -1361,6 +1434,7 @@
 			ops->priority	= priority;
 			ops->priv	= chain;
 			ops->hook	= afi->hooks[ops->hooknum];
+			ops->dev	= dev;
 			if (hookfn)
 				ops->hook = hookfn;
 			if (afi->hook_ops_init)
@@ -1380,12 +1454,9 @@
 	chain->table = table;
 	nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
 
-	if (!(table->flags & NFT_TABLE_F_DORMANT) &&
-	    chain->flags & NFT_BASE_CHAIN) {
-		err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
-		if (err < 0)
-			goto err1;
-	}
+	err = nf_tables_register_hooks(table, chain, afi->nops);
+	if (err < 0)
+		goto err1;
 
 	nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
 	err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f153b07..f77bad4 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -114,7 +114,8 @@
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
 	const struct nft_chain *chain = ops->priv, *basechain = chain;
-	const struct net *net = read_pnet(&nft_base_chain(basechain)->pnet);
+	const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
+	const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 	const struct nft_rule *rule;
 	const struct nft_expr *expr, *last;
 	struct nft_regs regs;
@@ -124,6 +125,10 @@
 	int rulenum;
 	unsigned int gencursor = nft_genmask_cur(net);
 
+	/* Ignore chains that are not for the current network namespace */
+	if (!net_eq(net, chain_net))
+		return NF_ACCEPT;
+
 do_chain:
 	rulenum = 0;
 	rule = list_entry(&chain->rules, struct nft_rule, list);
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
new file mode 100644
index 0000000..2cae4d4
--- /dev/null
+++ b/net/netfilter/nf_tables_netdev.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netfilter/nf_tables.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static inline void
+nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+			    const struct nf_hook_ops *ops, struct sk_buff *skb,
+			    const struct nf_hook_state *state)
+{
+	struct iphdr *iph, _iph;
+	u32 len, thoff;
+
+	nft_set_pktinfo(pkt, ops, skb, state);
+
+	iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+				 &_iph);
+	if (!iph)
+		return;
+
+	iph = ip_hdr(skb);
+	if (iph->ihl < 5 || iph->version != 4)
+		return;
+
+	len = ntohs(iph->tot_len);
+	thoff = iph->ihl * 4;
+	if (skb->len < len)
+		return;
+	else if (len < thoff)
+		return;
+
+	pkt->tprot = iph->protocol;
+	pkt->xt.thoff = thoff;
+	pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+}
+
+static inline void
+__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+			      const struct nf_hook_ops *ops,
+			      struct sk_buff *skb,
+			      const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+	struct ipv6hdr *ip6h, _ip6h;
+	unsigned int thoff = 0;
+	unsigned short frag_off;
+	int protohdr;
+	u32 pkt_len;
+
+	ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+				  &_ip6h);
+	if (!ip6h)
+		return;
+
+	if (ip6h->version != 6)
+		return;
+
+	pkt_len = ntohs(ip6h->payload_len);
+	if (pkt_len + sizeof(*ip6h) > skb->len)
+		return;
+
+	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+	if (protohdr < 0)
+                return;
+
+	pkt->tprot = protohdr;
+	pkt->xt.thoff = thoff;
+	pkt->xt.fragoff = frag_off;
+#endif
+}
+
+static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+					       const struct nf_hook_ops *ops,
+					       struct sk_buff *skb,
+					       const struct nf_hook_state *state)
+{
+	nft_set_pktinfo(pkt, ops, skb, state);
+	__nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state);
+}
+
+static unsigned int
+nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb,
+		    const struct nf_hook_state *state)
+{
+	struct nft_pktinfo pkt;
+
+	switch (eth_hdr(skb)->h_proto) {
+	case htons(ETH_P_IP):
+		nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state);
+		break;
+	case htons(ETH_P_IPV6):
+		nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state);
+		break;
+	default:
+		nft_set_pktinfo(&pkt, ops, skb, state);
+		break;
+	}
+
+	return nft_do_chain(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_netdev __read_mostly = {
+	.family		= NFPROTO_NETDEV,
+	.nhooks		= NF_NETDEV_NUMHOOKS,
+	.owner		= THIS_MODULE,
+	.flags		= NFT_AF_NEEDS_DEV,
+	.nops		= 1,
+	.hooks		= {
+		[NF_NETDEV_INGRESS]	= nft_do_chain_netdev,
+	},
+};
+
+static int nf_tables_netdev_init_net(struct net *net)
+{
+	net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+	if (net->nft.netdev == NULL)
+		return -ENOMEM;
+
+	memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev));
+
+	if (nft_register_afinfo(net, net->nft.netdev) < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(net->nft.netdev);
+	return -ENOMEM;
+}
+
+static void nf_tables_netdev_exit_net(struct net *net)
+{
+	nft_unregister_afinfo(net->nft.netdev);
+	kfree(net->nft.netdev);
+}
+
+static struct pernet_operations nf_tables_netdev_net_ops = {
+	.init	= nf_tables_netdev_init_net,
+	.exit	= nf_tables_netdev_exit_net,
+};
+
+static const struct nf_chain_type nft_filter_chain_netdev = {
+	.name		= "filter",
+	.type		= NFT_CHAIN_T_DEFAULT,
+	.family		= NFPROTO_NETDEV,
+	.owner		= THIS_MODULE,
+	.hook_mask	= (1 << NF_NETDEV_INGRESS),
+};
+
+static void nft_netdev_event(unsigned long event, struct nft_af_info *afi,
+			     struct net_device *dev, struct nft_table *table,
+			     struct nft_base_chain *basechain)
+{
+	switch (event) {
+	case NETDEV_REGISTER:
+		if (strcmp(basechain->dev_name, dev->name) != 0)
+			return;
+
+		BUG_ON(!(basechain->flags & NFT_BASECHAIN_DISABLED));
+
+		dev_hold(dev);
+		basechain->ops[0].dev = dev;
+		basechain->flags &= ~NFT_BASECHAIN_DISABLED;
+		if (!(table->flags & NFT_TABLE_F_DORMANT))
+			nft_register_basechain(basechain, afi->nops);
+		break;
+	case NETDEV_UNREGISTER:
+		if (strcmp(basechain->dev_name, dev->name) != 0)
+			return;
+
+		BUG_ON(basechain->flags & NFT_BASECHAIN_DISABLED);
+
+		if (!(table->flags & NFT_TABLE_F_DORMANT))
+			nft_unregister_basechain(basechain, afi->nops);
+
+		dev_put(basechain->ops[0].dev);
+		basechain->ops[0].dev = NULL;
+		basechain->flags |= NFT_BASECHAIN_DISABLED;
+		break;
+	case NETDEV_CHANGENAME:
+		if (dev->ifindex != basechain->ops[0].dev->ifindex)
+			return;
+
+		strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
+		break;
+	}
+}
+
+static int nf_tables_netdev_event(struct notifier_block *this,
+				  unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct nft_af_info *afi;
+	struct nft_table *table;
+	struct nft_chain *chain;
+
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_for_each_entry(afi, &dev_net(dev)->nft.af_info, list) {
+		if (afi->family != NFPROTO_NETDEV)
+			continue;
+
+		list_for_each_entry(table, &afi->tables, list) {
+			list_for_each_entry(chain, &table->chains, list) {
+				if (!(chain->flags & NFT_BASE_CHAIN))
+					continue;
+
+				nft_netdev_event(event, afi, dev, table,
+						 nft_base_chain(chain));
+			}
+		}
+	}
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_tables_netdev_notifier = {
+	.notifier_call	= nf_tables_netdev_event,
+};
+
+static int __init nf_tables_netdev_init(void)
+{
+	int ret;
+
+	nft_register_chain_type(&nft_filter_chain_netdev);
+	ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
+	if (ret < 0)
+		nft_unregister_chain_type(&nft_filter_chain_netdev);
+
+	register_netdevice_notifier(&nf_tables_netdev_notifier);
+
+	return ret;
+}
+
+static void __exit nf_tables_netdev_exit(void)
+{
+	unregister_netdevice_notifier(&nf_tables_netdev_notifier);
+	unregister_pernet_subsys(&nf_tables_netdev_net_ops);
+	nft_unregister_chain_type(&nft_filter_chain_netdev);
+}
+
+module_init(nf_tables_netdev_init);
+module_exit(nf_tables_netdev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 4ef1fae..4670821 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -598,8 +598,6 @@
 	return -1;
 }
 
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
-
 static struct nf_loginfo default_loginfo = {
 	.type =		NF_LOG_TYPE_ULOG,
 	.u = {
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 11c7682..685cc6a 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -278,6 +278,23 @@
 	return -1;
 }
 
+static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
+{
+	u32 seclen = 0;
+#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
+	if (!skb || !sk_fullsock(skb->sk))
+		return 0;
+
+	read_lock_bh(&skb->sk->sk_callback_lock);
+
+	if (skb->secmark)
+		security_secid_to_secctx(skb->secmark, secdata, &seclen);
+
+	read_unlock_bh(&skb->sk->sk_callback_lock);
+#endif
+	return seclen;
+}
+
 static struct sk_buff *
 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 			   struct nf_queue_entry *entry,
@@ -297,6 +314,8 @@
 	struct nf_conn *ct = NULL;
 	enum ip_conntrack_info uninitialized_var(ctinfo);
 	bool csum_verify;
+	char *secdata = NULL;
+	u32 seclen = 0;
 
 	size =    nlmsg_total_size(sizeof(struct nfgenmsg))
 		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
@@ -352,6 +371,12 @@
 			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
 	}
 
+	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
+		seclen = nfqnl_get_sk_secctx(entskb, &secdata);
+		if (seclen)
+			size += nla_total_size(seclen);
+	}
+
 	skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
 				  GFP_ATOMIC);
 	if (!skb) {
@@ -479,6 +504,9 @@
 	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
 		goto nla_put_failure;
 
+	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
+		goto nla_put_failure;
+
 	if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
 		goto nla_put_failure;
 
@@ -806,8 +834,6 @@
 	rcu_read_unlock();
 }
 
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
-
 static int
 nfqnl_rcv_dev_event(struct notifier_block *this,
 		    unsigned long event, void *ptr)
@@ -824,6 +850,27 @@
 	.notifier_call	= nfqnl_rcv_dev_event,
 };
 
+static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr)
+{
+	return entry->elem == (struct nf_hook_ops *)ops_ptr;
+}
+
+static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook)
+{
+	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+	int i;
+
+	rcu_read_lock();
+	for (i = 0; i < INSTANCE_BUCKETS; i++) {
+		struct nfqnl_instance *inst;
+		struct hlist_head *head = &q->instance_table[i];
+
+		hlist_for_each_entry_rcu(inst, head, hlist)
+			nfqnl_flush(inst, nf_hook_cmp, (unsigned long)hook);
+	}
+	rcu_read_unlock();
+}
+
 static int
 nfqnl_rcv_nl_event(struct notifier_block *this,
 		   unsigned long event, void *ptr)
@@ -1031,7 +1078,8 @@
 };
 
 static const struct nf_queue_handler nfqh = {
-	.outfn	= &nfqnl_enqueue_packet,
+	.outfn		= &nfqnl_enqueue_packet,
+	.nf_hook_drop	= &nfqnl_nf_hook_drop,
 };
 
 static int
@@ -1142,7 +1190,12 @@
 			ret = -EOPNOTSUPP;
 			goto err_out_unlock;
 		}
-
+#if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
+		if (flags & mask & NFQA_CFG_F_SECCTX) {
+			ret = -EOPNOTSUPP;
+			goto err_out_unlock;
+		}
+#endif
 		spin_lock_bh(&queue->lock);
 		queue->flags &= ~mask;
 		queue->flags |= flags & mask;
@@ -1257,7 +1310,7 @@
 		   inst->copy_mode, inst->copy_range,
 		   inst->queue_dropped, inst->queue_user_dropped,
 		   inst->id_sequence, 1);
-	return seq_has_overflowed(s);
+	return 0;
 }
 
 static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 7f29cfc..66def31 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -161,6 +161,7 @@
 		par->hook_mask = 0;
 	}
 	par->family	= ctx->afi->family;
+	par->nft_compat = true;
 }
 
 static void target_compat_from_user(struct xt_target *t, void *in, void *out)
@@ -377,6 +378,7 @@
 		par->hook_mask = 0;
 	}
 	par->family	= ctx->afi->family;
+	par->nft_compat = true;
 }
 
 static void match_compat_from_user(struct xt_match *m, void *in, void *out)
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 51a459c..d324fe7 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -658,35 +658,23 @@
 
 struct xt_table_info *xt_alloc_table_info(unsigned int size)
 {
-	struct xt_table_info *newinfo;
-	int cpu;
+	struct xt_table_info *info = NULL;
+	size_t sz = sizeof(*info) + size;
 
 	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
 	if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
 		return NULL;
 
-	newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
-	if (!newinfo)
-		return NULL;
-
-	newinfo->size = size;
-
-	for_each_possible_cpu(cpu) {
-		if (size <= PAGE_SIZE)
-			newinfo->entries[cpu] = kmalloc_node(size,
-							GFP_KERNEL,
-							cpu_to_node(cpu));
-		else
-			newinfo->entries[cpu] = vmalloc_node(size,
-							cpu_to_node(cpu));
-
-		if (newinfo->entries[cpu] == NULL) {
-			xt_free_table_info(newinfo);
+	if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+		info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+	if (!info) {
+		info = vmalloc(sz);
+		if (!info)
 			return NULL;
-		}
 	}
-
-	return newinfo;
+	memset(info, 0, sizeof(*info));
+	info->size = size;
+	return info;
 }
 EXPORT_SYMBOL(xt_alloc_table_info);
 
@@ -694,9 +682,6 @@
 {
 	int cpu;
 
-	for_each_possible_cpu(cpu)
-		kvfree(info->entries[cpu]);
-
 	if (info->jumpstack != NULL) {
 		for_each_possible_cpu(cpu)
 			kvfree(info->jumpstack[cpu]);
@@ -705,7 +690,7 @@
 
 	free_percpu(info->stackptr);
 
-	kfree(info);
+	kvfree(info);
 }
 EXPORT_SYMBOL(xt_free_table_info);
 
@@ -947,11 +932,9 @@
 {
 	struct xt_table *table = list_entry(v, struct xt_table, list);
 
-	if (strlen(table->name)) {
+	if (*table->name)
 		seq_printf(seq, "%s\n", table->name);
-		return seq_has_overflowed(seq);
-	} else
-		return 0;
+	return 0;
 }
 
 static const struct seq_operations xt_table_seq_ops = {
@@ -1087,10 +1070,8 @@
 		if (trav->curr == trav->head)
 			return 0;
 		match = list_entry(trav->curr, struct xt_match, list);
-		if (*match->name == '\0')
-			return 0;
-		seq_printf(seq, "%s\n", match->name);
-		return seq_has_overflowed(seq);
+		if (*match->name)
+			seq_printf(seq, "%s\n", match->name);
 	}
 	return 0;
 }
@@ -1142,10 +1123,8 @@
 		if (trav->curr == trav->head)
 			return 0;
 		target = list_entry(trav->curr, struct xt_target, list);
-		if (*target->name == '\0')
-			return 0;
-		seq_printf(seq, "%s\n", target->name);
-		return seq_has_overflowed(seq);
+		if (*target->name)
+			seq_printf(seq, "%s\n", target->name);
 	}
 	return 0;
 }
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index e762de5..8c3190e 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -277,6 +277,9 @@
 			"FORWARD, OUTPUT and POSTROUTING hooks\n");
 		return -EINVAL;
 	}
+	if (par->nft_compat)
+		return 0;
+
 	xt_ematch_foreach(ematch, e)
 		if (find_syn_match(ematch))
 			return 0;
@@ -299,6 +302,9 @@
 			"FORWARD, OUTPUT and POSTROUTING hooks\n");
 		return -EINVAL;
 	}
+	if (par->nft_compat)
+		return 0;
+
 	xt_ematch_foreach(ematch, e)
 		if (find_syn_match(ematch))
 			return 0;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 292934d..a747eb4 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -152,6 +152,7 @@
 	fl6.daddr = info->gw.in6;
 	fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
 			   (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+	fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
 		dst_release(dst);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index fab6eea..5b4743c 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -73,7 +73,7 @@
 
 	if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
 		ret |= XT_ADDRTYPE_LOCAL;
-	if (rt->rt6i_flags & RTF_ANYCAST)
+	if (ipv6_anycast_destination((struct dst_entry *)rt, addr))
 		ret |= XT_ADDRTYPE_ANYCAST;
 
 	dst_release(&rt->dst);
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 2334523..ebd41dc 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -23,6 +23,7 @@
 MODULE_ALIAS("ip6t_mark");
 MODULE_ALIAS("ipt_MARK");
 MODULE_ALIAS("ip6t_MARK");
+MODULE_ALIAS("arpt_MARK");
 
 static unsigned int
 mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 8904598..5669e5b 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -9,14 +9,16 @@
  */
 
 /* Kernel module which implements the set match and SET target
- * for netfilter/iptables. */
+ * for netfilter/iptables.
+ */
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
 
 #include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <uapi/linux/netfilter/xt_set.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -52,6 +54,7 @@
 set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v0 *info = par->matchinfo;
+
 	ADT_OPT(opt, par->family, info->match_set.u.compat.dim,
 		info->match_set.u.compat.flags, 0, UINT_MAX);
 
@@ -68,10 +71,10 @@
 	info->u.compat.dim = IPSET_DIM_ZERO;
 	if (info->u.flags[0] & IPSET_MATCH_INV)
 		info->u.compat.flags |= IPSET_INV_MATCH;
-	for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+	for (i = 0; i < IPSET_DIM_MAX - 1 && info->u.flags[i]; i++) {
 		info->u.compat.dim++;
 		if (info->u.flags[i] & IPSET_SRC)
-			info->u.compat.flags |= (1<<info->u.compat.dim);
+			info->u.compat.flags |= (1 << info->u.compat.dim);
 	}
 }
 
@@ -88,7 +91,7 @@
 			info->match_set.index);
 		return -ENOENT;
 	}
-	if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+	if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
 		pr_warn("Protocol error: set match dimension is over the limit!\n");
 		ip_set_nfnl_put(par->net, info->match_set.index);
 		return -ERANGE;
@@ -114,6 +117,7 @@
 set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v1 *info = par->matchinfo;
+
 	ADT_OPT(opt, par->family, info->match_set.dim,
 		info->match_set.flags, 0, UINT_MAX);
 
@@ -178,9 +182,10 @@
 set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v3 *info = par->matchinfo;
+	int ret;
+
 	ADT_OPT(opt, par->family, info->match_set.dim,
 		info->match_set.flags, info->flags, UINT_MAX);
-	int ret;
 
 	if (info->packets.op != IPSET_COUNTER_NONE ||
 	    info->bytes.op != IPSET_COUNTER_NONE)
@@ -224,9 +229,10 @@
 set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v4 *info = par->matchinfo;
+	int ret;
+
 	ADT_OPT(opt, par->family, info->match_set.dim,
 		info->match_set.flags, info->flags, UINT_MAX);
-	int ret;
 
 	if (info->packets.op != IPSET_COUNTER_NONE ||
 	    info->bytes.op != IPSET_COUNTER_NONE)
@@ -252,6 +258,7 @@
 set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_set_info_target_v0 *info = par->targinfo;
+
 	ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim,
 		info->add_set.u.compat.flags, 0, UINT_MAX);
 	ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim,
@@ -290,8 +297,8 @@
 			return -ENOENT;
 		}
 	}
-	if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
-	    info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+	if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
+	    info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
 		pr_warn("Protocol error: SET target dimension is over the limit!\n");
 		if (info->add_set.index != IPSET_INVALID_ID)
 			ip_set_nfnl_put(par->net, info->add_set.index);
@@ -324,6 +331,7 @@
 set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_set_info_target_v1 *info = par->targinfo;
+
 	ADT_OPT(add_opt, par->family, info->add_set.dim,
 		info->add_set.flags, 0, UINT_MAX);
 	ADT_OPT(del_opt, par->family, info->del_set.dim,
@@ -392,6 +400,7 @@
 set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_set_info_target_v2 *info = par->targinfo;
+
 	ADT_OPT(add_opt, par->family, info->add_set.dim,
 		info->add_set.flags, info->flags, info->timeout);
 	ADT_OPT(del_opt, par->family, info->del_set.dim,
@@ -399,8 +408,8 @@
 
 	/* Normalize to fit into jiffies */
 	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
-	    add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
-		add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
+	    add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
+		add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
 	if (info->del_set.index != IPSET_INVALID_ID)
@@ -418,6 +427,8 @@
 set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_set_info_target_v3 *info = par->targinfo;
+	int ret;
+
 	ADT_OPT(add_opt, par->family, info->add_set.dim,
 		info->add_set.flags, info->flags, info->timeout);
 	ADT_OPT(del_opt, par->family, info->del_set.dim,
@@ -425,12 +436,10 @@
 	ADT_OPT(map_opt, par->family, info->map_set.dim,
 		info->map_set.flags, 0, UINT_MAX);
 
-	int ret;
-
 	/* Normalize to fit into jiffies */
 	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
-	    add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
-		add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
+	    add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
+		add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
 	if (info->del_set.index != IPSET_INVALID_ID)
@@ -456,7 +465,6 @@
 	return XT_CONTINUE;
 }
 
-
 static int
 set_target_v3_checkentry(const struct xt_tgchk_param *par)
 {
@@ -496,8 +504,7 @@
 		     !(par->hook_mask & (1 << NF_INET_FORWARD |
 					 1 << NF_INET_LOCAL_OUT |
 					 1 << NF_INET_POST_ROUTING))) {
-			pr_warn("mapping of prio or/and queue is allowed only"
-				"from OUTPUT/FORWARD/POSTROUTING chains\n");
+			pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
 			return -EINVAL;
 		}
 		index = ip_set_nfnl_get_byindex(par->net,
@@ -518,8 +525,7 @@
 	if (info->add_set.dim > IPSET_DIM_MAX ||
 	    info->del_set.dim > IPSET_DIM_MAX ||
 	    info->map_set.dim > IPSET_DIM_MAX) {
-		pr_warn("Protocol error: SET target dimension "
-			"is over the limit!\n");
+		pr_warn("Protocol error: SET target dimension is over the limit!\n");
 		if (info->add_set.index != IPSET_INVALID_ID)
 			ip_set_nfnl_put(par->net, info->add_set.index);
 		if (info->del_set.index != IPSET_INVALID_ID)
@@ -545,7 +551,6 @@
 		ip_set_nfnl_put(par->net, info->map_set.index);
 }
 
-
 static struct xt_match set_matches[] __read_mostly = {
 	{
 		.name		= "set",
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index e092cb0..43e26c8 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -205,6 +205,7 @@
 socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 	     const struct xt_socket_mtinfo1 *info)
 {
+	struct sk_buff *pskb = (struct sk_buff *)skb;
 	struct sock *sk = skb->sk;
 
 	if (!sk)
@@ -226,6 +227,10 @@
 		if (info->flags & XT_SOCKET_TRANSPARENT)
 			transparent = xt_socket_sk_is_transparent(sk);
 
+		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
+		    transparent)
+			pskb->mark = sk->sk_mark;
+
 		if (sk != skb->sk)
 			sock_gen_put(sk);
 
@@ -247,7 +252,7 @@
 }
 
 static bool
-socket_mt4_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+socket_mt4_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	return socket_match(skb, par, par->matchinfo);
 }
@@ -371,9 +376,10 @@
 }
 
 static bool
-socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+	struct sk_buff *pskb = (struct sk_buff *)skb;
 	struct sock *sk = skb->sk;
 
 	if (!sk)
@@ -395,6 +401,10 @@
 		if (info->flags & XT_SOCKET_TRANSPARENT)
 			transparent = xt_socket_sk_is_transparent(sk);
 
+		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
+		    transparent)
+			pskb->mark = sk->sk_mark;
+
 		if (sk != skb->sk)
 			sock_gen_put(sk);
 
@@ -428,6 +438,19 @@
 	return 0;
 }
 
+static int socket_mt_v3_check(const struct xt_mtchk_param *par)
+{
+	const struct xt_socket_mtinfo3 *info =
+				    (struct xt_socket_mtinfo3 *)par->matchinfo;
+
+	if (info->flags & ~XT_SOCKET_FLAGS_V3) {
+		pr_info("unknown flags 0x%x\n",
+			info->flags & ~XT_SOCKET_FLAGS_V3);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static struct xt_match socket_mt_reg[] __read_mostly = {
 	{
 		.name		= "socket",
@@ -442,7 +465,7 @@
 		.name		= "socket",
 		.revision	= 1,
 		.family		= NFPROTO_IPV4,
-		.match		= socket_mt4_v1_v2,
+		.match		= socket_mt4_v1_v2_v3,
 		.checkentry	= socket_mt_v1_check,
 		.matchsize	= sizeof(struct xt_socket_mtinfo1),
 		.hooks		= (1 << NF_INET_PRE_ROUTING) |
@@ -454,7 +477,7 @@
 		.name		= "socket",
 		.revision	= 1,
 		.family		= NFPROTO_IPV6,
-		.match		= socket_mt6_v1_v2,
+		.match		= socket_mt6_v1_v2_v3,
 		.checkentry	= socket_mt_v1_check,
 		.matchsize	= sizeof(struct xt_socket_mtinfo1),
 		.hooks		= (1 << NF_INET_PRE_ROUTING) |
@@ -466,7 +489,7 @@
 		.name		= "socket",
 		.revision	= 2,
 		.family		= NFPROTO_IPV4,
-		.match		= socket_mt4_v1_v2,
+		.match		= socket_mt4_v1_v2_v3,
 		.checkentry	= socket_mt_v2_check,
 		.matchsize	= sizeof(struct xt_socket_mtinfo1),
 		.hooks		= (1 << NF_INET_PRE_ROUTING) |
@@ -478,7 +501,7 @@
 		.name		= "socket",
 		.revision	= 2,
 		.family		= NFPROTO_IPV6,
-		.match		= socket_mt6_v1_v2,
+		.match		= socket_mt6_v1_v2_v3,
 		.checkentry	= socket_mt_v2_check,
 		.matchsize	= sizeof(struct xt_socket_mtinfo1),
 		.hooks		= (1 << NF_INET_PRE_ROUTING) |
@@ -486,6 +509,30 @@
 		.me		= THIS_MODULE,
 	},
 #endif
+	{
+		.name		= "socket",
+		.revision	= 3,
+		.family		= NFPROTO_IPV4,
+		.match		= socket_mt4_v1_v2_v3,
+		.checkentry	= socket_mt_v3_check,
+		.matchsize	= sizeof(struct xt_socket_mtinfo1),
+		.hooks		= (1 << NF_INET_PRE_ROUTING) |
+				  (1 << NF_INET_LOCAL_IN),
+		.me		= THIS_MODULE,
+	},
+#ifdef XT_SOCKET_HAVE_IPV6
+	{
+		.name		= "socket",
+		.revision	= 3,
+		.family		= NFPROTO_IPV6,
+		.match		= socket_mt6_v1_v2_v3,
+		.checkentry	= socket_mt_v3_check,
+		.matchsize	= sizeof(struct xt_socket_mtinfo1),
+		.hooks		= (1 << NF_INET_PRE_ROUTING) |
+				  (1 << NF_INET_LOCAL_IN),
+		.me		= THIS_MODULE,
+	},
+#endif
 };
 
 static int __init socket_mt_init(void)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index bf6e766..dea9253 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -76,17 +76,18 @@
 };
 
 /* state bits */
-#define NETLINK_CONGESTED	0x0
+#define NETLINK_S_CONGESTED		0x0
 
 /* flags */
-#define NETLINK_KERNEL_SOCKET	0x1
-#define NETLINK_RECV_PKTINFO	0x2
-#define NETLINK_BROADCAST_SEND_ERROR	0x4
-#define NETLINK_RECV_NO_ENOBUFS	0x8
+#define NETLINK_F_KERNEL_SOCKET		0x1
+#define NETLINK_F_RECV_PKTINFO		0x2
+#define NETLINK_F_BROADCAST_SEND_ERROR	0x4
+#define NETLINK_F_RECV_NO_ENOBUFS	0x8
+#define NETLINK_F_LISTEN_ALL_NSID	0x10
 
 static inline int netlink_is_kernel(struct sock *sk)
 {
-	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
+	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
 }
 
 struct netlink_table *nl_table __read_mostly;
@@ -256,8 +257,9 @@
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 
-	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
-		if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
+	if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
+		if (!test_and_set_bit(NETLINK_S_CONGESTED,
+				      &nlk_sk(sk)->state)) {
 			sk->sk_err = ENOBUFS;
 			sk->sk_error_report(sk);
 		}
@@ -270,8 +272,8 @@
 	struct netlink_sock *nlk = nlk_sk(sk);
 
 	if (skb_queue_empty(&sk->sk_receive_queue))
-		clear_bit(NETLINK_CONGESTED, &nlk->state);
-	if (!test_bit(NETLINK_CONGESTED, &nlk->state))
+		clear_bit(NETLINK_S_CONGESTED, &nlk->state);
+	if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
 		wake_up_interruptible(&nlk->wait);
 }
 
@@ -1118,14 +1120,15 @@
 };
 
 static int __netlink_create(struct net *net, struct socket *sock,
-			    struct mutex *cb_mutex, int protocol)
+			    struct mutex *cb_mutex, int protocol,
+			    int kern)
 {
 	struct sock *sk;
 	struct netlink_sock *nlk;
 
 	sock->ops = &netlink_ops;
 
-	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
+	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
@@ -1187,7 +1190,7 @@
 	if (err < 0)
 		goto out;
 
-	err = __netlink_create(net, sock, cb_mutex, protocol);
+	err = __netlink_create(net, sock, cb_mutex, protocol, kern);
 	if (err < 0)
 		goto out_module;
 
@@ -1297,20 +1300,24 @@
 	struct netlink_table *table = &nl_table[sk->sk_protocol];
 	s32 portid = task_tgid_vnr(current);
 	int err;
-	static s32 rover = -4097;
+	s32 rover = -4096;
+	bool ok;
 
 retry:
 	cond_resched();
 	rcu_read_lock();
-	if (__netlink_lookup(table, portid, net)) {
+	ok = !__netlink_lookup(table, portid, net);
+	rcu_read_unlock();
+	if (!ok) {
 		/* Bind collision, search negative portid values. */
-		portid = rover--;
-		if (rover > -4097)
+		if (rover == -4096)
+			/* rover will be in range [S32_MIN, -4097] */
+			rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
+		else if (rover >= -4096)
 			rover = -4097;
-		rcu_read_unlock();
+		portid = rover--;
 		goto retry;
 	}
-	rcu_read_unlock();
 
 	err = netlink_insert(sk, portid);
 	if (err == -EADDRINUSE)
@@ -1657,7 +1664,7 @@
 	nlk = nlk_sk(sk);
 
 	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-	     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+	     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
 	    !netlink_skb_is_mmaped(skb)) {
 		DECLARE_WAITQUEUE(wait, current);
 		if (!*timeo) {
@@ -1672,7 +1679,7 @@
 		add_wait_queue(&nlk->wait, &wait);
 
 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-		     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+		     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
 		    !sock_flag(sk, SOCK_DEAD))
 			*timeo = schedule_timeout(*timeo);
 
@@ -1896,7 +1903,7 @@
 	struct netlink_sock *nlk = nlk_sk(sk);
 
 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
-	    !test_bit(NETLINK_CONGESTED, &nlk->state)) {
+	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
 		netlink_skb_set_owner_r(skb, sk);
 		__netlink_sendskb(sk, skb);
 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
@@ -1932,8 +1939,17 @@
 	    !test_bit(p->group - 1, nlk->groups))
 		return;
 
-	if (!net_eq(sock_net(sk), p->net))
-		return;
+	if (!net_eq(sock_net(sk), p->net)) {
+		if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
+			return;
+
+		if (!peernet_has_id(sock_net(sk), p->net))
+			return;
+
+		if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
+				     CAP_NET_BROADCAST))
+			return;
+	}
 
 	if (p->failure) {
 		netlink_overrun(sk);
@@ -1957,23 +1973,33 @@
 		netlink_overrun(sk);
 		/* Clone failed. Notify ALL listeners. */
 		p->failure = 1;
-		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
+		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
 			p->delivery_failure = 1;
-	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
+		goto out;
+	}
+	if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
 		kfree_skb(p->skb2);
 		p->skb2 = NULL;
-	} else if (sk_filter(sk, p->skb2)) {
+		goto out;
+	}
+	if (sk_filter(sk, p->skb2)) {
 		kfree_skb(p->skb2);
 		p->skb2 = NULL;
-	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
+		goto out;
+	}
+	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
+	NETLINK_CB(p->skb2).nsid_is_set = true;
+	val = netlink_broadcast_deliver(sk, p->skb2);
+	if (val < 0) {
 		netlink_overrun(sk);
-		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
+		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
 			p->delivery_failure = 1;
 	} else {
 		p->congested |= val;
 		p->delivered = 1;
 		p->skb2 = NULL;
 	}
+out:
 	sock_put(sk);
 }
 
@@ -2058,7 +2084,7 @@
 	    !test_bit(p->group - 1, nlk->groups))
 		goto out;
 
-	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
+	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
 		ret = 1;
 		goto out;
 	}
@@ -2077,7 +2103,7 @@
  * @code: error code, must be negative (as usual in kernelspace)
  *
  * This function returns the number of broadcast listeners that have set the
- * NETLINK_RECV_NO_ENOBUFS socket option.
+ * NETLINK_NO_ENOBUFS socket option.
  */
 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
 {
@@ -2137,9 +2163,9 @@
 	switch (optname) {
 	case NETLINK_PKTINFO:
 		if (val)
-			nlk->flags |= NETLINK_RECV_PKTINFO;
+			nlk->flags |= NETLINK_F_RECV_PKTINFO;
 		else
-			nlk->flags &= ~NETLINK_RECV_PKTINFO;
+			nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
 		err = 0;
 		break;
 	case NETLINK_ADD_MEMBERSHIP:
@@ -2168,18 +2194,18 @@
 	}
 	case NETLINK_BROADCAST_ERROR:
 		if (val)
-			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
+			nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
 		else
-			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
+			nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
 		err = 0;
 		break;
 	case NETLINK_NO_ENOBUFS:
 		if (val) {
-			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
-			clear_bit(NETLINK_CONGESTED, &nlk->state);
+			nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
+			clear_bit(NETLINK_S_CONGESTED, &nlk->state);
 			wake_up_interruptible(&nlk->wait);
 		} else {
-			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
+			nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
 		}
 		err = 0;
 		break;
@@ -2202,6 +2228,16 @@
 		break;
 	}
 #endif /* CONFIG_NETLINK_MMAP */
+	case NETLINK_LISTEN_ALL_NSID:
+		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
+			return -EPERM;
+
+		if (val)
+			nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
+		else
+			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
+		err = 0;
+		break;
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -2228,7 +2264,7 @@
 		if (len < sizeof(int))
 			return -EINVAL;
 		len = sizeof(int);
-		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
+		val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
 		if (put_user(len, optlen) ||
 		    put_user(val, optval))
 			return -EFAULT;
@@ -2238,7 +2274,7 @@
 		if (len < sizeof(int))
 			return -EINVAL;
 		len = sizeof(int);
-		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
+		val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
 		if (put_user(len, optlen) ||
 		    put_user(val, optval))
 			return -EFAULT;
@@ -2248,12 +2284,34 @@
 		if (len < sizeof(int))
 			return -EINVAL;
 		len = sizeof(int);
-		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
+		val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
 		if (put_user(len, optlen) ||
 		    put_user(val, optval))
 			return -EFAULT;
 		err = 0;
 		break;
+	case NETLINK_LIST_MEMBERSHIPS: {
+		int pos, idx, shift;
+
+		err = 0;
+		netlink_table_grab();
+		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
+			if (len - pos < sizeof(u32))
+				break;
+
+			idx = pos / sizeof(unsigned long);
+			shift = (pos % sizeof(unsigned long)) * 8;
+			if (put_user((u32)(nlk->groups[idx] >> shift),
+				     (u32 __user *)(optval + pos))) {
+				err = -EFAULT;
+				break;
+			}
+		}
+		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
+			err = -EFAULT;
+		netlink_table_ungrab();
+		break;
+	}
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -2268,6 +2326,16 @@
 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
 }
 
+static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
+					 struct sk_buff *skb)
+{
+	if (!NETLINK_CB(skb).nsid_is_set)
+		return;
+
+	put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
+		 &NETLINK_CB(skb).nsid);
+}
+
 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
 	struct sock *sk = sock->sk;
@@ -2419,8 +2487,10 @@
 		msg->msg_namelen = sizeof(*addr);
 	}
 
-	if (nlk->flags & NETLINK_RECV_PKTINFO)
+	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
 		netlink_cmsg_recv_pktinfo(msg, skb);
+	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
+		netlink_cmsg_listen_all_nsid(sk, msg, skb);
 
 	memset(&scm, 0, sizeof(scm));
 	scm.creds = *NETLINK_CREDS(skb);
@@ -2474,17 +2544,10 @@
 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
 		return NULL;
 
-	/*
-	 * We have to just have a reference on the net from sk, but don't
-	 * get_net it. Besides, we cannot get and then put the net here.
-	 * So we create one inside init_net and the move it to net.
-	 */
-
-	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
+	if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
 		goto out_sock_release_nosk;
 
 	sk = sock->sk;
-	sk_change_net(sk, net);
 
 	if (!cfg || cfg->groups < 32)
 		groups = 32;
@@ -2503,7 +2566,7 @@
 		goto out_sock_release;
 
 	nlk = nlk_sk(sk);
-	nlk->flags |= NETLINK_KERNEL_SOCKET;
+	nlk->flags |= NETLINK_F_KERNEL_SOCKET;
 
 	netlink_table_grab();
 	if (!nl_table[unit].registered) {
@@ -2540,7 +2603,10 @@
 void
 netlink_kernel_release(struct sock *sk)
 {
-	sk_release_kernel(sk);
+	if (sk == NULL || sk->sk_socket == NULL)
+		return;
+
+	sock_release(sk->sk_socket);
 }
 EXPORT_SYMBOL(netlink_kernel_release);
 
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index b987fd5..ed212ff 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -433,7 +433,7 @@
 	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
+	sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern);
 	if (sk  == NULL)
 		return -ENOMEM;
 
@@ -476,7 +476,7 @@
 	if (osk->sk_type != SOCK_SEQPACKET)
 		return NULL;
 
-	sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
+	sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0);
 	if (sk == NULL)
 		return NULL;
 
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 96b64d2..d72a4f1 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -31,7 +31,6 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
-#include <linux/netfilter.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <net/netrom.h>
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index 2277276..54e40fa 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -40,7 +40,7 @@
 
 	read_lock(&proto_tab_lock);
 	if (proto_tab[proto] &&	try_module_get(proto_tab[proto]->owner)) {
-		rc = proto_tab[proto]->create(net, sock, proto_tab[proto]);
+		rc = proto_tab[proto]->create(net, sock, proto_tab[proto], kern);
 		module_put(proto_tab[proto]->owner);
 	}
 	read_unlock(&proto_tab_lock);
diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
index de1789e..1f68724 100644
--- a/net/nfc/llcp.h
+++ b/net/nfc/llcp.h
@@ -225,7 +225,7 @@
 			       struct sk_buff *skb, u8 direction);
 
 /* Sock API */
-struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern);
 void nfc_llcp_sock_free(struct nfc_llcp_sock *sock);
 void nfc_llcp_accept_unlink(struct sock *sk);
 void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index b18f07c..9887627 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -934,7 +934,7 @@
 		sock->ssap = ssap;
 	}
 
-	new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
+	new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC, 0);
 	if (new_sk == NULL) {
 		reason = LLCP_DM_REJ;
 		release_sock(&sock->sk);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 9578bd6..b7de0da 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -942,12 +942,12 @@
 	}
 }
 
-struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern)
 {
 	struct sock *sk;
 	struct nfc_llcp_sock *llcp_sock;
 
-	sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto);
+	sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern);
 	if (!sk)
 		return NULL;
 
@@ -993,7 +993,7 @@
 }
 
 static int llcp_sock_create(struct net *net, struct socket *sock,
-			    const struct nfc_protocol *nfc_proto)
+			    const struct nfc_protocol *nfc_proto, int kern)
 {
 	struct sock *sk;
 
@@ -1009,7 +1009,7 @@
 	else
 		sock->ops = &llcp_sock_ops;
 
-	sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
+	sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
 	if (sk == NULL)
 		return -ENOMEM;
 
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index a4f1e42..901c1dd 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -19,3 +19,10 @@
 	  an NFC Controller (NFCC) and a Device Host (DH).
 
 	  Say yes if you use an NCI driver that requires SPI link layer.
+
+config NFC_NCI_UART
+	depends on NFC_NCI && TTY
+	tristate "NCI over UART protocol support"
+	default n
+	help
+	  Say yes if you use an NCI driver that requires UART link layer.
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
index 7ed8949..b4b85b8 100644
--- a/net/nfc/nci/Makefile
+++ b/net/nfc/nci/Makefile
@@ -7,3 +7,6 @@
 nci-objs := core.o data.o lib.o ntf.o rsp.o hci.o
 
 nci-$(CONFIG_NFC_NCI_SPI) += spi.o
+
+nci_uart-y += uart.o
+obj-$(CONFIG_NFC_NCI_UART) += nci_uart.o
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 49ff321..95af2d2 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -28,6 +28,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
 
 #include <linux/module.h>
+#include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <linux/completion.h>
@@ -73,6 +74,7 @@
 		complete(&ndev->req_completion);
 	}
 }
+EXPORT_SYMBOL(nci_req_complete);
 
 static void nci_req_cancel(struct nci_dev *ndev, int err)
 {
@@ -323,6 +325,32 @@
 		     sizeof(struct nci_rf_deactivate_cmd), &cmd);
 }
 
+struct nci_prop_cmd_param {
+	__u16 opcode;
+	size_t len;
+	__u8 *payload;
+};
+
+static void nci_prop_cmd_req(struct nci_dev *ndev, unsigned long opt)
+{
+	struct nci_prop_cmd_param *param = (struct nci_prop_cmd_param *)opt;
+
+	nci_send_cmd(ndev, param->opcode, param->len, param->payload);
+}
+
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload)
+{
+	struct nci_prop_cmd_param param;
+
+	param.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, oid);
+	param.len = len;
+	param.payload = payload;
+
+	return __nci_request(ndev, nci_prop_cmd_req, (unsigned long)&param,
+			     msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_prop_cmd);
+
 static int nci_open_device(struct nci_dev *ndev)
 {
 	int rc = 0;
@@ -343,11 +371,17 @@
 
 	set_bit(NCI_INIT, &ndev->flags);
 
-	rc = __nci_request(ndev, nci_reset_req, 0,
-			   msecs_to_jiffies(NCI_RESET_TIMEOUT));
+	if (ndev->ops->init)
+		rc = ndev->ops->init(ndev);
 
-	if (ndev->ops->setup)
-		ndev->ops->setup(ndev);
+	if (!rc) {
+		rc = __nci_request(ndev, nci_reset_req, 0,
+				   msecs_to_jiffies(NCI_RESET_TIMEOUT));
+	}
+
+	if (!rc && ndev->ops->setup) {
+		rc = ndev->ops->setup(ndev);
+	}
 
 	if (!rc) {
 		rc = __nci_request(ndev, nci_init_req, 0,
@@ -407,6 +441,12 @@
 	set_bit(NCI_INIT, &ndev->flags);
 	__nci_request(ndev, nci_reset_req, 0,
 		      msecs_to_jiffies(NCI_RESET_TIMEOUT));
+
+	/* After this point our queues are empty
+	 * and no works are scheduled.
+	 */
+	ndev->ops->close(ndev);
+
 	clear_bit(NCI_INIT, &ndev->flags);
 
 	del_timer_sync(&ndev->cmd_timer);
@@ -414,10 +454,6 @@
 	/* Flush cmd wq */
 	flush_workqueue(ndev->cmd_wq);
 
-	/* After this point our queues are empty
-	 * and no works are scheduled. */
-	ndev->ops->close(ndev);
-
 	/* Clear flags */
 	ndev->flags = 0;
 
@@ -762,7 +798,7 @@
 
 	if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
 		nci_request(ndev, nci_rf_deactivate_req,
-			    NCI_DEACTIVATE_TYPE_SLEEP_MODE,
+			    NCI_DEACTIVATE_TYPE_IDLE_MODE,
 			    msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 	}
 }
@@ -961,6 +997,14 @@
 		return NULL;
 
 	ndev->ops = ops;
+
+	if (ops->n_prop_ops > NCI_MAX_PROPRIETARY_CMD) {
+		pr_err("Too many proprietary commands: %zd\n",
+		       ops->n_prop_ops);
+		ops->prop_ops = NULL;
+		ops->n_prop_ops = 0;
+	}
+
 	ndev->tx_headroom = tx_headroom;
 	ndev->tx_tailroom = tx_tailroom;
 	init_completion(&ndev->req_completion);
@@ -1165,6 +1209,49 @@
 	return 0;
 }
 
+/* Proprietary commands API */
+static struct nci_prop_ops *prop_cmd_lookup(struct nci_dev *ndev,
+					    __u16 opcode)
+{
+	size_t i;
+	struct nci_prop_ops *prop_op;
+
+	if (!ndev->ops->prop_ops || !ndev->ops->n_prop_ops)
+		return NULL;
+
+	for (i = 0; i < ndev->ops->n_prop_ops; i++) {
+		prop_op = &ndev->ops->prop_ops[i];
+		if (prop_op->opcode == opcode)
+			return prop_op;
+	}
+
+	return NULL;
+}
+
+int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 rsp_opcode,
+			struct sk_buff *skb)
+{
+	struct nci_prop_ops *prop_op;
+
+	prop_op = prop_cmd_lookup(ndev, rsp_opcode);
+	if (!prop_op || !prop_op->rsp)
+		return -ENOTSUPP;
+
+	return prop_op->rsp(ndev, skb);
+}
+
+int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 ntf_opcode,
+			struct sk_buff *skb)
+{
+	struct nci_prop_ops *prop_op;
+
+	prop_op = prop_cmd_lookup(ndev, ntf_opcode);
+	if (!prop_op || !prop_op->ntf)
+		return -ENOTSUPP;
+
+	return prop_op->ntf(ndev, skb);
+}
+
 /* ---- NCI TX Data worker thread ---- */
 
 static void nci_tx_work(struct work_struct *work)
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index ed54ec5..af002df 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -639,22 +639,19 @@
 				 ndev->hci_dev->init_data.gates[0].gate,
 				 ndev->hci_dev->init_data.gates[0].pipe);
 	if (r < 0)
-		goto exit;
+		return r;
 
 	r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
 			      NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb);
 	if (r < 0)
-		goto exit;
+		return r;
 
 	if (skb->len &&
 	    skb->len == strlen(ndev->hci_dev->init_data.session_id) &&
-	    memcmp(ndev->hci_dev->init_data.session_id,
-		   skb->data, skb->len) == 0 &&
+	    !memcmp(ndev->hci_dev->init_data.session_id, skb->data, skb->len) &&
 	    ndev->ops->hci_load_session) {
 		/* Restore gate<->pipe table from some proprietary location. */
 		r = ndev->ops->hci_load_session(ndev);
-		if (r < 0)
-			goto exit;
 	} else {
 		r = nci_hci_dev_connect_gates(ndev,
 					      ndev->hci_dev->init_data.gate_count,
@@ -667,8 +664,6 @@
 				      ndev->hci_dev->init_data.session_id,
 				      strlen(ndev->hci_dev->init_data.session_id));
 	}
-	if (r == 0)
-		goto exit;
 
 exit:
 	kfree_skb(skb);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 3218071..5d1c2e3 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -758,6 +758,15 @@
 	/* strip the nci control header */
 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
 
+	if (nci_opcode_gid(ntf_opcode) == NCI_GID_PROPRIETARY) {
+		if (nci_prop_ntf_packet(ndev, ntf_opcode, skb)) {
+			pr_err("unsupported ntf opcode 0x%x\n",
+			       ntf_opcode);
+		}
+
+		goto end;
+	}
+
 	switch (ntf_opcode) {
 	case NCI_OP_CORE_CONN_CREDITS_NTF:
 		nci_core_conn_credits_ntf_packet(ndev, skb);
@@ -796,5 +805,6 @@
 		break;
 	}
 
+end:
 	kfree_skb(skb);
 }
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 02486bc..408bd8f 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -296,6 +296,15 @@
 	/* strip the nci control header */
 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
 
+	if (nci_opcode_gid(rsp_opcode) == NCI_GID_PROPRIETARY) {
+		if (nci_prop_rsp_packet(ndev, rsp_opcode, skb) == -ENOTSUPP) {
+			pr_err("unsupported rsp opcode 0x%x\n",
+			       rsp_opcode);
+		}
+
+		goto end;
+	}
+
 	switch (rsp_opcode) {
 	case NCI_OP_CORE_RESET_RSP:
 		nci_core_reset_rsp_packet(ndev, skb);
@@ -346,6 +355,7 @@
 		break;
 	}
 
+end:
 	kfree_skb(skb);
 
 	/* trigger the next cmd */
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
new file mode 100644
index 0000000..21d8875
--- /dev/null
+++ b/net/nfc/nci/uart.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+/* Inspired (hugely) by HCI LDISC implementation in Bluetooth.
+ *
+ *  Copyright (C) 2000-2001  Qualcomm Incorporated
+ *  Copyright (C) 2002-2003  Maxim Krasnyansky <maxk@qualcomm.com>
+ *  Copyright (C) 2004-2005  Marcel Holtmann <marcel@holtmann.org>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/poll.h>
+
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+#include <linux/ioctl.h>
+#include <linux/skbuff.h>
+
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+/* TX states  */
+#define NCI_UART_SENDING	1
+#define NCI_UART_TX_WAKEUP	2
+
+static struct nci_uart *nci_uart_drivers[NCI_UART_DRIVER_MAX];
+
+static inline struct sk_buff *nci_uart_dequeue(struct nci_uart *nu)
+{
+	struct sk_buff *skb = nu->tx_skb;
+
+	if (!skb)
+		skb = skb_dequeue(&nu->tx_q);
+	else
+		nu->tx_skb = NULL;
+
+	return skb;
+}
+
+static inline int nci_uart_queue_empty(struct nci_uart *nu)
+{
+	if (nu->tx_skb)
+		return 0;
+
+	return skb_queue_empty(&nu->tx_q);
+}
+
+static int nci_uart_tx_wakeup(struct nci_uart *nu)
+{
+	if (test_and_set_bit(NCI_UART_SENDING, &nu->tx_state)) {
+		set_bit(NCI_UART_TX_WAKEUP, &nu->tx_state);
+		return 0;
+	}
+
+	schedule_work(&nu->write_work);
+
+	return 0;
+}
+
+static void nci_uart_write_work(struct work_struct *work)
+{
+	struct nci_uart *nu = container_of(work, struct nci_uart, write_work);
+	struct tty_struct *tty = nu->tty;
+	struct sk_buff *skb;
+
+restart:
+	clear_bit(NCI_UART_TX_WAKEUP, &nu->tx_state);
+
+	if (nu->ops.tx_start)
+		nu->ops.tx_start(nu);
+
+	while ((skb = nci_uart_dequeue(nu))) {
+		int len;
+
+		set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+		len = tty->ops->write(tty, skb->data, skb->len);
+		skb_pull(skb, len);
+		if (skb->len) {
+			nu->tx_skb = skb;
+			break;
+		}
+		kfree_skb(skb);
+	}
+
+	if (test_bit(NCI_UART_TX_WAKEUP, &nu->tx_state))
+		goto restart;
+
+	if (nu->ops.tx_done && nci_uart_queue_empty(nu))
+		nu->ops.tx_done(nu);
+
+	clear_bit(NCI_UART_SENDING, &nu->tx_state);
+}
+
+static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver)
+{
+	struct nci_uart *nu = NULL;
+	int ret;
+
+	if (driver >= NCI_UART_DRIVER_MAX)
+		return -EINVAL;
+
+	if (!nci_uart_drivers[driver])
+		return -ENOENT;
+
+	nu = kzalloc(sizeof(*nu), GFP_KERNEL);
+	if (!nu)
+		return -ENOMEM;
+
+	memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart));
+	nu->tty = tty;
+	tty->disc_data = nu;
+	skb_queue_head_init(&nu->tx_q);
+	INIT_WORK(&nu->write_work, nci_uart_write_work);
+	spin_lock_init(&nu->rx_lock);
+
+	ret = nu->ops.open(nu);
+	if (ret) {
+		tty->disc_data = NULL;
+		kfree(nu);
+	} else if (!try_module_get(nu->owner)) {
+		nu->ops.close(nu);
+		tty->disc_data = NULL;
+		kfree(nu);
+		return -ENOENT;
+	}
+	return ret;
+}
+
+/* ------ LDISC part ------ */
+
+/* nci_uart_tty_open
+ *
+ *     Called when line discipline changed to NCI_UART.
+ *
+ * Arguments:
+ *     tty    pointer to tty info structure
+ * Return Value:
+ *     0 if success, otherwise error code
+ */
+static int nci_uart_tty_open(struct tty_struct *tty)
+{
+	/* Error if the tty has no write op instead of leaving an exploitable
+	 * hole
+	 */
+	if (!tty->ops->write)
+		return -EOPNOTSUPP;
+
+	tty->disc_data = NULL;
+	tty->receive_room = 65536;
+
+	/* Flush any pending characters in the driver and line discipline. */
+
+	/* FIXME: why is this needed. Note don't use ldisc_ref here as the
+	 * open path is before the ldisc is referencable.
+	 */
+
+	if (tty->ldisc->ops->flush_buffer)
+		tty->ldisc->ops->flush_buffer(tty);
+	tty_driver_flush_buffer(tty);
+
+	return 0;
+}
+
+/* nci_uart_tty_close()
+ *
+ *    Called when the line discipline is changed to something
+ *    else, the tty is closed, or the tty detects a hangup.
+ */
+static void nci_uart_tty_close(struct tty_struct *tty)
+{
+	struct nci_uart *nu = (void *)tty->disc_data;
+
+	/* Detach from the tty */
+	tty->disc_data = NULL;
+
+	if (!nu)
+		return;
+
+	if (nu->tx_skb)
+		kfree_skb(nu->tx_skb);
+	if (nu->rx_skb)
+		kfree_skb(nu->rx_skb);
+
+	skb_queue_purge(&nu->tx_q);
+
+	nu->ops.close(nu);
+	nu->tty = NULL;
+	module_put(nu->owner);
+
+	cancel_work_sync(&nu->write_work);
+
+	kfree(nu);
+}
+
+/* nci_uart_tty_wakeup()
+ *
+ *    Callback for transmit wakeup. Called when low level
+ *    device driver can accept more send data.
+ *
+ * Arguments:        tty    pointer to associated tty instance data
+ * Return Value:    None
+ */
+static void nci_uart_tty_wakeup(struct tty_struct *tty)
+{
+	struct nci_uart *nu = (void *)tty->disc_data;
+
+	if (!nu)
+		return;
+
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+	if (tty != nu->tty)
+		return;
+
+	nci_uart_tx_wakeup(nu);
+}
+
+/* nci_uart_tty_receive()
+ *
+ *     Called by tty low level driver when receive data is
+ *     available.
+ *
+ * Arguments:  tty          pointer to tty isntance data
+ *             data         pointer to received data
+ *             flags        pointer to flags for data
+ *             count        count of received data in bytes
+ *
+ * Return Value:    None
+ */
+static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
+				 char *flags, int count)
+{
+	struct nci_uart *nu = (void *)tty->disc_data;
+
+	if (!nu || tty != nu->tty)
+		return;
+
+	spin_lock(&nu->rx_lock);
+	nu->ops.recv_buf(nu, (void *)data, flags, count);
+	spin_unlock(&nu->rx_lock);
+
+	tty_unthrottle(tty);
+}
+
+/* nci_uart_tty_ioctl()
+ *
+ *    Process IOCTL system call for the tty device.
+ *
+ * Arguments:
+ *
+ *    tty        pointer to tty instance data
+ *    file       pointer to open file object for device
+ *    cmd        IOCTL command code
+ *    arg        argument for IOCTL call (cmd dependent)
+ *
+ * Return Value:    Command dependent
+ */
+static int nci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+			      unsigned int cmd, unsigned long arg)
+{
+	struct nci_uart *nu = (void *)tty->disc_data;
+	int err = 0;
+
+	switch (cmd) {
+	case NCIUARTSETDRIVER:
+		if (!nu)
+			return nci_uart_set_driver(tty, (unsigned int)arg);
+		else
+			return -EBUSY;
+		break;
+	default:
+		err = n_tty_ioctl_helper(tty, file, cmd, arg);
+		break;
+	}
+
+	return err;
+}
+
+/* We don't provide read/write/poll interface for user space. */
+static ssize_t nci_uart_tty_read(struct tty_struct *tty, struct file *file,
+				 unsigned char __user *buf, size_t nr)
+{
+	return 0;
+}
+
+static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file,
+				  const unsigned char *data, size_t count)
+{
+	return 0;
+}
+
+static unsigned int nci_uart_tty_poll(struct tty_struct *tty,
+				      struct file *filp, poll_table *wait)
+{
+	return 0;
+}
+
+static int nci_uart_send(struct nci_uart *nu, struct sk_buff *skb)
+{
+	/* Queue TX packet */
+	skb_queue_tail(&nu->tx_q, skb);
+
+	/* Try to start TX (if possible) */
+	nci_uart_tx_wakeup(nu);
+
+	return 0;
+}
+
+/* -- Default recv_buf handler --
+ *
+ * This handler supposes that NCI frames are sent over UART link without any
+ * framing. It reads NCI header, retrieve the packet size and once all packet
+ * bytes are received it passes it to nci_uart driver for processing.
+ */
+static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
+				     char *flags, int count)
+{
+	int chunk_len;
+
+	if (!nu->ndev) {
+		nfc_err(nu->tty->dev,
+			"receive data from tty but no NCI dev is attached yet, drop buffer\n");
+		return 0;
+	}
+
+	/* Decode all incoming data in packets
+	 * and enqueue then for processing.
+	 */
+	while (count > 0) {
+		/* If this is the first data of a packet, allocate a buffer */
+		if (!nu->rx_skb) {
+			nu->rx_packet_len = -1;
+			nu->rx_skb = nci_skb_alloc(nu->ndev,
+						   NCI_MAX_PACKET_SIZE,
+						   GFP_KERNEL);
+			if (!nu->rx_skb)
+				return -ENOMEM;
+		}
+
+		/* Eat byte after byte till full packet header is received */
+		if (nu->rx_skb->len < NCI_CTRL_HDR_SIZE) {
+			*skb_put(nu->rx_skb, 1) = *data++;
+			--count;
+			continue;
+		}
+
+		/* Header was received but packet len was not read */
+		if (nu->rx_packet_len < 0)
+			nu->rx_packet_len = NCI_CTRL_HDR_SIZE +
+				nci_plen(nu->rx_skb->data);
+
+		/* Compute how many bytes are missing and how many bytes can
+		 * be consumed.
+		 */
+		chunk_len = nu->rx_packet_len - nu->rx_skb->len;
+		if (count < chunk_len)
+			chunk_len = count;
+		memcpy(skb_put(nu->rx_skb, chunk_len), data, chunk_len);
+		data += chunk_len;
+		count -= chunk_len;
+
+		/* Chcek if packet is fully received */
+		if (nu->rx_packet_len == nu->rx_skb->len) {
+			/* Pass RX packet to driver */
+			if (nu->ops.recv(nu, nu->rx_skb) != 0)
+				nfc_err(nu->tty->dev, "corrupted RX packet\n");
+			/* Next packet will be a new one */
+			nu->rx_skb = NULL;
+		}
+	}
+
+	return 0;
+}
+
+/* -- Default recv handler -- */
+static int nci_uart_default_recv(struct nci_uart *nu, struct sk_buff *skb)
+{
+	return nci_recv_frame(nu->ndev, skb);
+}
+
+int nci_uart_register(struct nci_uart *nu)
+{
+	if (!nu || !nu->ops.open ||
+	    !nu->ops.recv || !nu->ops.close)
+		return -EINVAL;
+
+	/* Set the send callback */
+	nu->ops.send = nci_uart_send;
+
+	/* Install default handlers if not overridden */
+	if (!nu->ops.recv_buf)
+		nu->ops.recv_buf = nci_uart_default_recv_buf;
+	if (!nu->ops.recv)
+		nu->ops.recv = nci_uart_default_recv;
+
+	/* Add this driver in the driver list */
+	if (nci_uart_drivers[nu->driver]) {
+		pr_err("driver %d is already registered\n", nu->driver);
+		return -EBUSY;
+	}
+	nci_uart_drivers[nu->driver] = nu;
+
+	pr_info("NCI uart driver '%s [%d]' registered\n", nu->name, nu->driver);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nci_uart_register);
+
+void nci_uart_unregister(struct nci_uart *nu)
+{
+	pr_info("NCI uart driver '%s [%d]' unregistered\n", nu->name,
+		nu->driver);
+
+	/* Remove this driver from the driver list */
+	nci_uart_drivers[nu->driver] = NULL;
+}
+EXPORT_SYMBOL_GPL(nci_uart_unregister);
+
+void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl)
+{
+	struct ktermios new_termios;
+
+	if (!nu->tty)
+		return;
+
+	down_read(&nu->tty->termios_rwsem);
+	new_termios = nu->tty->termios;
+	up_read(&nu->tty->termios_rwsem);
+	tty_termios_encode_baud_rate(&new_termios, baudrate, baudrate);
+
+	if (flow_ctrl)
+		new_termios.c_cflag |= CRTSCTS;
+	else
+		new_termios.c_cflag &= ~CRTSCTS;
+
+	tty_set_termios(nu->tty, &new_termios);
+}
+EXPORT_SYMBOL_GPL(nci_uart_set_config);
+
+static struct tty_ldisc_ops nci_uart_ldisc = {
+	.magic		= TTY_LDISC_MAGIC,
+	.owner		= THIS_MODULE,
+	.name		= "n_nci",
+	.open		= nci_uart_tty_open,
+	.close		= nci_uart_tty_close,
+	.read		= nci_uart_tty_read,
+	.write		= nci_uart_tty_write,
+	.poll		= nci_uart_tty_poll,
+	.receive_buf	= nci_uart_tty_receive,
+	.write_wakeup	= nci_uart_tty_wakeup,
+	.ioctl		= nci_uart_tty_ioctl,
+};
+
+static int __init nci_uart_init(void)
+{
+	memset(nci_uart_drivers, 0, sizeof(nci_uart_drivers));
+	return tty_register_ldisc(N_NCI, &nci_uart_ldisc);
+}
+
+static void __exit nci_uart_exit(void)
+{
+	tty_unregister_ldisc(N_NCI);
+}
+
+module_init(nci_uart_init);
+module_exit(nci_uart_exit);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("NFC NCI UART driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_NCI);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 3763036..f85f37e 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -5,6 +5,12 @@
  *    Lauro Ramos Venancio <lauro.venancio@openbossa.org>
  *    Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
  *
+ * Vendor commands implementation based on net/wireless/nl80211.c
+ * which is:
+ *
+ * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014  Intel Mobile Communications GmbH
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -1489,6 +1495,50 @@
 	return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
 }
 
+static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+			       struct genl_info *info)
+{
+	struct nfc_dev *dev;
+	struct nfc_vendor_cmd *cmd;
+	u32 dev_idx, vid, subcmd;
+	u8 *data;
+	size_t data_len;
+	int i;
+
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+	    !info->attrs[NFC_ATTR_VENDOR_ID] ||
+	    !info->attrs[NFC_ATTR_VENDOR_SUBCMD])
+		return -EINVAL;
+
+	dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+	vid = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_ID]);
+	subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]);
+
+	dev = nfc_get_device(dev_idx);
+	if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
+		return -ENODEV;
+
+	data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
+	if (data) {
+		data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
+		if (data_len == 0)
+			return -EINVAL;
+	} else {
+		data_len = 0;
+	}
+
+	for (i = 0; i < dev->n_vendor_cmds; i++) {
+		cmd = &dev->vendor_cmds[i];
+
+		if (cmd->vendor_id != vid || cmd->subcmd != subcmd)
+			continue;
+
+		return cmd->doit(dev, data, data_len);
+	}
+
+	return -EOPNOTSUPP;
+}
+
 static const struct genl_ops nfc_genl_ops[] = {
 	{
 		.cmd = NFC_CMD_GET_DEVICE,
@@ -1579,6 +1629,11 @@
 		.doit = nfc_genl_activate_target,
 		.policy = nfc_genl_policy,
 	},
+	{
+		.cmd = NFC_CMD_VENDOR,
+		.doit = nfc_genl_vendor_cmd,
+		.policy = nfc_genl_policy,
+	},
 };
 
 
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index a8ce80b..5c93e84 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -30,7 +30,7 @@
 	struct proto *proto;
 	struct module *owner;
 	int (*create)(struct net *net, struct socket *sock,
-		      const struct nfc_protocol *nfc_proto);
+		      const struct nfc_protocol *nfc_proto, int kern);
 };
 
 struct nfc_rawsock {
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 82b4e80..e9a9148 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -334,7 +334,7 @@
 }
 
 static int rawsock_create(struct net *net, struct socket *sock,
-			  const struct nfc_protocol *nfc_proto)
+			  const struct nfc_protocol *nfc_proto, int kern)
 {
 	struct sock *sk;
 
@@ -348,7 +348,7 @@
 	else
 		sock->ops = &rawsock_ops;
 
-	sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
+	sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index ed6b0f8..1584040 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -59,7 +59,7 @@
 config OPENVSWITCH_GENEVE
 	tristate "Open vSwitch Geneve tunneling support"
 	depends on OPENVSWITCH
-	depends on GENEVE
+	depends on GENEVE_CORE
 	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create geneve vport.
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index b491c1c..8a8c0b8 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -608,17 +608,16 @@
 }
 
 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
-			    struct sw_flow_key *key, const struct nlattr *attr)
+			    struct sw_flow_key *key, const struct nlattr *attr,
+			    const struct nlattr *actions, int actions_len)
 {
 	struct ovs_tunnel_info info;
 	struct dp_upcall_info upcall;
 	const struct nlattr *a;
 	int rem;
 
+	memset(&upcall, 0, sizeof(upcall));
 	upcall.cmd = OVS_PACKET_CMD_ACTION;
-	upcall.userdata = NULL;
-	upcall.portid = 0;
-	upcall.egress_tun_info = NULL;
 
 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 		 a = nla_next(a, &rem)) {
@@ -647,6 +646,13 @@
 			break;
 		}
 
+		case OVS_USERSPACE_ATTR_ACTIONS: {
+			/* Include actions. */
+			upcall.actions = actions;
+			upcall.actions_len = actions_len;
+			break;
+		}
+
 		} /* End of switch. */
 	}
 
@@ -654,7 +660,8 @@
 }
 
 static int sample(struct datapath *dp, struct sk_buff *skb,
-		  struct sw_flow_key *key, const struct nlattr *attr)
+		  struct sw_flow_key *key, const struct nlattr *attr,
+		  const struct nlattr *actions, int actions_len)
 {
 	const struct nlattr *acts_list = NULL;
 	const struct nlattr *a;
@@ -688,7 +695,7 @@
 	 */
 	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
 		   nla_is_last(a, rem)))
-		return output_userspace(dp, skb, key, a);
+		return output_userspace(dp, skb, key, a, actions, actions_len);
 
 	skb = skb_clone(skb, GFP_ATOMIC);
 	if (!skb)
@@ -872,7 +879,7 @@
 			break;
 
 		case OVS_ACTION_ATTR_USERSPACE:
-			output_userspace(dp, skb, key, a);
+			output_userspace(dp, skb, key, a, attr, len);
 			break;
 
 		case OVS_ACTION_ATTR_HASH:
@@ -916,7 +923,7 @@
 			break;
 
 		case OVS_ACTION_ATTR_SAMPLE:
-			err = sample(dp, skb, key, a);
+			err = sample(dp, skb, key, a, attr, len);
 			break;
 		}
 
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 096c627..ff8c4a4 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -272,10 +272,9 @@
 		struct dp_upcall_info upcall;
 		int error;
 
+		memset(&upcall, 0, sizeof(upcall));
 		upcall.cmd = OVS_PACKET_CMD_MISS;
-		upcall.userdata = NULL;
 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
-		upcall.egress_tun_info = NULL;
 		error = ovs_dp_upcall(dp, skb, key, &upcall);
 		if (unlikely(error))
 			kfree_skb(skb);
@@ -397,6 +396,10 @@
 	if (upcall_info->egress_tun_info)
 		size += nla_total_size(ovs_tun_key_attr_size());
 
+	/* OVS_PACKET_ATTR_ACTIONS */
+	if (upcall_info->actions_len)
+		size += nla_total_size(upcall_info->actions_len);
+
 	return size;
 }
 
@@ -478,6 +481,17 @@
 		nla_nest_end(user_skb, nla);
 	}
 
+	if (upcall_info->actions_len) {
+		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+		err = ovs_nla_put_actions(upcall_info->actions,
+					  upcall_info->actions_len,
+					  user_skb);
+		if (!err)
+			nla_nest_end(user_skb, nla);
+		else
+			nla_nest_cancel(user_skb, nla);
+	}
+
 	/* Only reserve room for attribute header, packet data is added
 	 * in skb_zerocopy() */
 	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -545,7 +559,7 @@
 	/* Normally, setting the skb 'protocol' field would be handled by a
 	 * call to eth_type_trans(), but it assumes there's a sending
 	 * device, which we may not have. */
-	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+	if (eth_proto_is_802_3(eth->h_proto))
 		packet->protocol = eth->h_proto;
 	else
 		packet->protocol = htons(ETH_P_802_2);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 4ec4a48..cd691e9 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -116,6 +116,8 @@
 struct dp_upcall_info {
 	const struct ovs_tunnel_info *egress_tun_info;
 	const struct nlattr *userdata;
+	const struct nlattr *actions;
+	int actions_len;
 	u32 portid;
 	u8 cmd;
 };
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 2dacc7b..bc7b0ab 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -332,7 +332,7 @@
 	proto = *(__be16 *) skb->data;
 	__skb_pull(skb, sizeof(__be16));
 
-	if (ntohs(proto) >= ETH_P_802_3_MIN)
+	if (eth_proto_is_802_3(proto))
 		return proto;
 
 	if (skb->len < sizeof(struct llc_snap_hdr))
@@ -349,7 +349,7 @@
 
 	__skb_pull(skb, sizeof(struct llc_snap_hdr));
 
-	if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
+	if (eth_proto_is_802_3(llc->ethertype))
 		return llc->ethertype;
 
 	return htons(ETH_P_802_2);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c691b1a..624e41c 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -816,7 +816,7 @@
 		if (is_mask) {
 			/* Always exact match EtherType. */
 			eth_type = htons(0xffff);
-		} else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+		} else if (!eth_proto_is_802_3(eth_type)) {
 			OVS_NLERR(log, "EtherType %x is less than min %x",
 				  ntohs(eth_type), ETH_P_802_3_MIN);
 			return -EINVAL;
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index bf02fd5..208c576 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -46,11 +46,6 @@
 	return vport_priv(vport);
 }
 
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
-	return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
 /* Convert 64 bit tunnel ID to 24 bit VNI. */
 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
 {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b5989c6..c9e8741 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -543,15 +543,11 @@
 	pkc->retire_blk_timer.expires = jiffies;
 }
 
-static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
+static void prb_setup_retire_blk_timer(struct packet_sock *po)
 {
 	struct tpacket_kbdq_core *pkc;
 
-	if (tx_ring)
-		BUG();
-
-	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
-			GET_PBDQC_FROM_RB(&po->rx_ring);
+	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
 }
 
@@ -607,7 +603,7 @@
 static void init_prb_bdqc(struct packet_sock *po,
 			struct packet_ring_buffer *rb,
 			struct pgv *pg_vec,
-			union tpacket_req_u *req_u, int tx_ring)
+			union tpacket_req_u *req_u)
 {
 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 	struct tpacket_block_desc *pbd;
@@ -634,7 +630,7 @@
 
 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 	prb_init_ft_ops(p1, req_u);
-	prb_setup_retire_blk_timer(po, tx_ring);
+	prb_setup_retire_blk_timer(po);
 	prb_open_block(p1, pbd);
 }
 
@@ -1234,27 +1230,81 @@
 	free_percpu(po->tx_ring.pending_refcnt);
 }
 
-static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
+#define ROOM_POW_OFF	2
+#define ROOM_NONE	0x0
+#define ROOM_LOW	0x1
+#define ROOM_NORMAL	0x2
+
+static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
+{
+	int idx, len;
+
+	len = po->rx_ring.frame_max + 1;
+	idx = po->rx_ring.head;
+	if (pow_off)
+		idx += len >> pow_off;
+	if (idx >= len)
+		idx -= len;
+	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
+}
+
+static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
+{
+	int idx, len;
+
+	len = po->rx_ring.prb_bdqc.knum_blocks;
+	idx = po->rx_ring.prb_bdqc.kactive_blk_num;
+	if (pow_off)
+		idx += len >> pow_off;
+	if (idx >= len)
+		idx -= len;
+	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
+}
+
+static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
 {
 	struct sock *sk = &po->sk;
+	int ret = ROOM_NONE;
+
+	if (po->prot_hook.func != tpacket_rcv) {
+		int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
+					  - (skb ? skb->truesize : 0);
+		if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
+			return ROOM_NORMAL;
+		else if (avail > 0)
+			return ROOM_LOW;
+		else
+			return ROOM_NONE;
+	}
+
+	if (po->tp_version == TPACKET_V3) {
+		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
+			ret = ROOM_NORMAL;
+		else if (__tpacket_v3_has_room(po, 0))
+			ret = ROOM_LOW;
+	} else {
+		if (__tpacket_has_room(po, ROOM_POW_OFF))
+			ret = ROOM_NORMAL;
+		else if (__tpacket_has_room(po, 0))
+			ret = ROOM_LOW;
+	}
+
+	return ret;
+}
+
+static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
+{
+	int ret;
 	bool has_room;
 
-	if (po->prot_hook.func != tpacket_rcv)
-		return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
-			<= sk->sk_rcvbuf;
+	spin_lock_bh(&po->sk.sk_receive_queue.lock);
+	ret = __packet_rcv_has_room(po, skb);
+	has_room = ret == ROOM_NORMAL;
+	if (po->pressure == has_room)
+		po->pressure = !has_room;
+	spin_unlock_bh(&po->sk.sk_receive_queue.lock);
 
-	spin_lock(&sk->sk_receive_queue.lock);
-	if (po->tp_version == TPACKET_V3)
-		has_room = prb_lookup_block(po, &po->rx_ring,
-					    po->rx_ring.prb_bdqc.kactive_blk_num,
-					    TP_STATUS_KERNEL);
-	else
-		has_room = packet_lookup_frame(po, &po->rx_ring,
-					       po->rx_ring.head,
-					       TP_STATUS_KERNEL);
-	spin_unlock(&sk->sk_receive_queue.lock);
-
-	return has_room;
+	return ret;
 }
 
 static void packet_sock_destruct(struct sock *sk)
@@ -1272,14 +1322,18 @@
 	sk_refcnt_debug_dec(sk);
 }
 
-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
+static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
 {
-	int x = atomic_read(&f->rr_cur) + 1;
+	u32 rxhash;
+	int i, count = 0;
 
-	if (x >= num)
-		x = 0;
+	rxhash = skb_get_hash(skb);
+	for (i = 0; i < ROLLOVER_HLEN; i++)
+		if (po->rollover->history[i] == rxhash)
+			count++;
 
-	return x;
+	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
+	return count > (ROLLOVER_HLEN >> 1);
 }
 
 static unsigned int fanout_demux_hash(struct packet_fanout *f,
@@ -1293,13 +1347,9 @@
 				    struct sk_buff *skb,
 				    unsigned int num)
 {
-	int cur, old;
+	unsigned int val = atomic_inc_return(&f->rr_cur);
 
-	cur = atomic_read(&f->rr_cur);
-	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
-				     fanout_rr_next(f, num))) != cur)
-		cur = old;
-	return cur;
+	return val % num;
 }
 
 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
@@ -1318,22 +1368,40 @@
 
 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
 					  struct sk_buff *skb,
-					  unsigned int idx, unsigned int skip,
+					  unsigned int idx, bool try_self,
 					  unsigned int num)
 {
-	unsigned int i, j;
+	struct packet_sock *po, *po_next, *po_skip = NULL;
+	unsigned int i, j, room = ROOM_NONE;
 
-	i = j = min_t(int, f->next[idx], num - 1);
+	po = pkt_sk(f->arr[idx]);
+
+	if (try_self) {
+		room = packet_rcv_has_room(po, skb);
+		if (room == ROOM_NORMAL ||
+		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
+			return idx;
+		po_skip = po;
+	}
+
+	i = j = min_t(int, po->rollover->sock, num - 1);
 	do {
-		if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
+		po_next = pkt_sk(f->arr[i]);
+		if (po_next != po_skip && !po_next->pressure &&
+		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
 			if (i != j)
-				f->next[idx] = i;
+				po->rollover->sock = i;
+			atomic_long_inc(&po->rollover->num);
+			if (room == ROOM_LOW)
+				atomic_long_inc(&po->rollover->num_huge);
 			return i;
 		}
+
 		if (++i == num)
 			i = 0;
 	} while (i != j);
 
+	atomic_long_inc(&po->rollover->num_failed);
 	return idx;
 }
 
@@ -1353,7 +1421,7 @@
 			     struct packet_type *pt, struct net_device *orig_dev)
 {
 	struct packet_fanout *f = pt->af_packet_priv;
-	unsigned int num = f->num_members;
+	unsigned int num = READ_ONCE(f->num_members);
 	struct packet_sock *po;
 	unsigned int idx;
 
@@ -1386,17 +1454,14 @@
 		idx = fanout_demux_qm(f, skb, num);
 		break;
 	case PACKET_FANOUT_ROLLOVER:
-		idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
+		idx = fanout_demux_rollover(f, skb, 0, false, num);
 		break;
 	}
 
-	po = pkt_sk(f->arr[idx]);
-	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
-	    unlikely(!packet_rcv_has_room(po, skb))) {
-		idx = fanout_demux_rollover(f, skb, idx, idx, num);
-		po = pkt_sk(f->arr[idx]);
-	}
+	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
+		idx = fanout_demux_rollover(f, skb, idx, true, num);
 
+	po = pkt_sk(f->arr[idx]);
 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
@@ -1467,6 +1532,16 @@
 	if (po->fanout)
 		return -EALREADY;
 
+	if (type == PACKET_FANOUT_ROLLOVER ||
+	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
+		po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
+		if (!po->rollover)
+			return -ENOMEM;
+		atomic_long_set(&po->rollover->num, 0);
+		atomic_long_set(&po->rollover->num_huge, 0);
+		atomic_long_set(&po->rollover->num_failed, 0);
+	}
+
 	mutex_lock(&fanout_mutex);
 	match = NULL;
 	list_for_each_entry(f, &fanout_list, list) {
@@ -1515,6 +1590,10 @@
 	}
 out:
 	mutex_unlock(&fanout_mutex);
+	if (err) {
+		kfree(po->rollover);
+		po->rollover = NULL;
+	}
 	return err;
 }
 
@@ -1536,6 +1615,9 @@
 		kfree(f);
 	}
 	mutex_unlock(&fanout_mutex);
+
+	if (po->rollover)
+		kfree_rcu(po->rollover, rcu);
 }
 
 static const struct proto_ops packet_ops;
@@ -2835,7 +2917,7 @@
 	sock->state = SS_UNCONNECTED;
 
 	err = -ENOBUFS;
-	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
+	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
 	if (sk == NULL)
 		goto out;
 
@@ -2865,6 +2947,7 @@
 
 	spin_lock_init(&po->bind_lock);
 	mutex_init(&po->pg_vec_lock);
+	po->rollover = NULL;
 	po->prot_hook.func = packet_rcv;
 
 	if (sock->type == SOCK_PACKET)
@@ -2942,6 +3025,9 @@
 	if (skb == NULL)
 		goto out;
 
+	if (pkt_sk(sk)->pressure)
+		packet_rcv_has_room(pkt_sk(sk), NULL);
+
 	if (pkt_sk(sk)->has_vnet_hdr) {
 		struct virtio_net_hdr vnet_hdr = { 0 };
 
@@ -3485,6 +3571,7 @@
 	struct packet_sock *po = pkt_sk(sk);
 	void *data = &val;
 	union tpacket_stats_u st;
+	struct tpacket_rollover_stats rstats;
 
 	if (level != SOL_PACKET)
 		return -ENOPROTOOPT;
@@ -3560,6 +3647,15 @@
 			((u32)po->fanout->flags << 24)) :
 		       0);
 		break;
+	case PACKET_ROLLOVER_STATS:
+		if (!po->rollover)
+			return -EINVAL;
+		rstats.tp_all = atomic_long_read(&po->rollover->num);
+		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+		data = &rstats;
+		lv = sizeof(rstats);
+		break;
 	case PACKET_TX_HAS_OFF:
 		val = po->tp_tx_has_off;
 		break;
@@ -3697,6 +3793,8 @@
 			TP_STATUS_KERNEL))
 			mask |= POLLIN | POLLRDNORM;
 	}
+	if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
+		po->pressure = 0;
 	spin_unlock_bh(&sk->sk_receive_queue.lock);
 	spin_lock_bh(&sk->sk_write_queue.lock);
 	if (po->tx_ring.pg_vec) {
@@ -3886,7 +3984,7 @@
 		 * it above but just being paranoid
 		 */
 			if (!tx_ring)
-				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
+				init_prb_bdqc(po, rb, pg_vec, req_u);
 			break;
 		default:
 			break;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index fe6e20c..e20b3e8 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -82,12 +82,21 @@
 	atomic_t		rr_cur;
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
-	int			next[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
 	atomic_t		sk_ref;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 
+struct packet_rollover {
+	int			sock;
+	struct rcu_head		rcu;
+	atomic_long_t		num;
+	atomic_long_t		num_huge;
+	atomic_long_t		num_failed;
+#define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
+	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
+} ____cacheline_aligned_in_smp;
+
 struct packet_sock {
 	/* struct sock has to be the first member of packet_sock */
 	struct sock		sk;
@@ -102,8 +111,10 @@
 				auxdata:1,
 				origdev:1,
 				has_vnet_hdr:1;
+	int			pressure;
 	int			ifindex;	/* bound device		*/
 	__be16			num;
+	struct packet_rollover	*rollover;
 	struct packet_mclist	*mclist;
 	atomic_t		mapped;
 	enum tpacket_versions	tp_version;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 32ab87d..10d42f3 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -97,7 +97,7 @@
 		goto out;
 	}
 
-	sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot);
+	sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot, kern);
 	if (sk == NULL) {
 		err = -ENOMEM;
 		goto out;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 6de2aeb..850a86c 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -845,7 +845,7 @@
 	}
 
 	/* Create a new to-be-accepted sock */
-	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
+	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
 	if (!newsk) {
 		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
 		err = -ENOBUFS;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 11b623c..896834c 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -261,6 +261,28 @@
 	return ret;
 }
 
+static int rds_set_transport(struct rds_sock *rs, char __user *optval,
+			     int optlen)
+{
+	int t_type;
+
+	if (rs->rs_transport)
+		return -EOPNOTSUPP; /* previously attached to transport */
+
+	if (optlen != sizeof(int))
+		return -EINVAL;
+
+	if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type)))
+		return -EFAULT;
+
+	if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
+		return -EINVAL;
+
+	rs->rs_transport = rds_trans_get(t_type);
+
+	return rs->rs_transport ? 0 : -ENOPROTOOPT;
+}
+
 static int rds_setsockopt(struct socket *sock, int level, int optname,
 			  char __user *optval, unsigned int optlen)
 {
@@ -291,6 +313,11 @@
 	case RDS_CONG_MONITOR:
 		ret = rds_cong_monitor(rs, optval, optlen);
 		break;
+	case SO_RDS_TRANSPORT:
+		lock_sock(sock->sk);
+		ret = rds_set_transport(rs, optval, optlen);
+		release_sock(sock->sk);
+		break;
 	default:
 		ret = -ENOPROTOOPT;
 	}
@@ -303,6 +330,7 @@
 {
 	struct rds_sock *rs = rds_sk_to_rs(sock->sk);
 	int ret = -ENOPROTOOPT, len;
+	int trans;
 
 	if (level != SOL_RDS)
 		goto out;
@@ -328,6 +356,19 @@
 		else
 			ret = 0;
 		break;
+	case SO_RDS_TRANSPORT:
+		if (len < sizeof(int)) {
+			ret = -EINVAL;
+			break;
+		}
+		trans = (rs->rs_transport ? rs->rs_transport->t_type :
+			 RDS_TRANS_NONE); /* unbound */
+		if (put_user(trans, (int __user *)optval) ||
+		    put_user(sizeof(int), optlen))
+			ret = -EFAULT;
+		else
+			ret = 0;
+		break;
 	default:
 		break;
 	}
@@ -431,7 +472,7 @@
 	if (sock->type != SOCK_SEQPACKET || protocol)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto);
+	sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/rds/bind.c b/net/rds/bind.c
index a2e6562..4ebd29c 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -181,6 +181,10 @@
 	if (ret)
 		goto out;
 
+	if (rs->rs_transport) { /* previously bound */
+		ret = 0;
+		goto out;
+	}
 	trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
 	if (!trans) {
 		ret = -EADDRNOTAVAIL;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 333611d..86d88ec 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -235,28 +235,34 @@
  * doesn't define it.
  */
 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
-		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
+					      struct scatterlist *sglist,
+					      unsigned int sg_dma_len,
+					      int direction)
 {
+	struct scatterlist *sg;
 	unsigned int i;
 
-	for (i = 0; i < sg_dma_len; ++i) {
+	for_each_sg(sglist, sg, sg_dma_len, i) {
 		ib_dma_sync_single_for_cpu(dev,
-				ib_sg_dma_address(dev, &sg[i]),
-				ib_sg_dma_len(dev, &sg[i]),
+				ib_sg_dma_address(dev, sg),
+				ib_sg_dma_len(dev, sg),
 				direction);
 	}
 }
 #define ib_dma_sync_sg_for_cpu	rds_ib_dma_sync_sg_for_cpu
 
 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
-		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
+						 struct scatterlist *sglist,
+						 unsigned int sg_dma_len,
+						 int direction)
 {
+	struct scatterlist *sg;
 	unsigned int i;
 
-	for (i = 0; i < sg_dma_len; ++i) {
+	for_each_sg(sglist, sg, sg_dma_len, i) {
 		ib_dma_sync_single_for_device(dev,
-				ib_sg_dma_address(dev, &sg[i]),
-				ib_sg_dma_len(dev, &sg[i]),
+				ib_sg_dma_address(dev, sg),
+				ib_sg_dma_len(dev, sg),
 				direction);
 	}
 }
diff --git a/net/rds/rds.h b/net/rds/rds.h
index cc54985..2260c1e4 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -410,11 +410,6 @@
  *                 should try hard not to block.
  */
 
-#define RDS_TRANS_IB	0
-#define RDS_TRANS_IWARP	1
-#define RDS_TRANS_TCP	2
-#define RDS_TRANS_COUNT	3
-
 struct rds_transport {
 	char			t_name[TRANSNAMSIZ];
 	struct list_head	t_item;
@@ -804,6 +799,7 @@
 void rds_trans_put(struct rds_transport *trans);
 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
 				       unsigned int avail);
+struct rds_transport *rds_trans_get(int t_type);
 int rds_trans_init(void);
 void rds_trans_exit(void);
 
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 7f2ac4f..8b4a6cd 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -101,6 +101,27 @@
 	return ret;
 }
 
+struct rds_transport *rds_trans_get(int t_type)
+{
+	struct rds_transport *ret = NULL;
+	struct rds_transport *trans;
+	unsigned int i;
+
+	down_read(&rds_trans_sem);
+	for (i = 0; i < RDS_TRANS_COUNT; i++) {
+		trans = transports[i];
+
+		if (trans && trans->t_type == t_type &&
+		    (!trans->t_owner || try_module_get(trans->t_owner))) {
+			ret = trans;
+			break;
+		}
+	}
+	up_read(&rds_trans_sem);
+
+	return ret;
+}
+
 /*
  * This returns the number of stats entries in the snapshot and only
  * copies them using the iter if there is enough space for them.  The
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index fa7cd79..f12149a 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -794,7 +794,8 @@
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
-static int rfkill_suspend(struct device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int rfkill_suspend(struct device *dev)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
 
@@ -818,13 +819,18 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
+#define RFKILL_PM_OPS (&rfkill_pm_ops)
+#else
+#define RFKILL_PM_OPS NULL
+#endif
+
 static struct class rfkill_class = {
 	.name		= "rfkill",
 	.dev_release	= rfkill_release,
 	.dev_groups	= rfkill_dev_groups,
 	.dev_uevent	= rfkill_dev_uevent,
-	.suspend	= rfkill_suspend,
-	.resume		= rfkill_resume,
+	.pm		= RFKILL_PM_OPS,
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index d978f2f..d5d58d9 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -112,21 +112,17 @@
 
 	rfkill->clk = devm_clk_get(&pdev->dev, NULL);
 
-	gpio = devm_gpiod_get(&pdev->dev, "reset");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		rfkill->reset_gpio = gpio;
-	}
+	gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(gpio))
+		return PTR_ERR(gpio);
 
-	gpio = devm_gpiod_get(&pdev->dev, "shutdown");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		rfkill->shutdown_gpio = gpio;
-	}
+	rfkill->reset_gpio = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+	if (IS_ERR(gpio))
+		return PTR_ERR(gpio);
+
+	rfkill->shutdown_gpio = gpio;
 
 	/* Make sure at-least one of the GPIO is defined and that
 	 * a name is specified for this instance
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8ae6030..129d357 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -192,7 +192,8 @@
 
 		if (rose->device == dev) {
 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
-			rose->neighbour->use--;
+			if (rose->neighbour)
+				rose->neighbour->use--;
 			rose->device = NULL;
 		}
 	}
@@ -520,7 +521,7 @@
 	if (sock->type != SOCK_SEQPACKET || protocol != 0)
 		return -ESOCKTNOSUPPORT;
 
-	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
+	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
 	if (sk == NULL)
 		return -ENOMEM;
 
@@ -559,7 +560,7 @@
 	if (osk->sk_type != SOCK_SEQPACKET)
 		return NULL;
 
-	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
+	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
 	if (sk == NULL)
 		return NULL;
 
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index e873d7d..c76638c 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -25,7 +25,6 @@
 #include <linux/fcntl.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
-#include <linux/netfilter.h>
 #include <net/rose.h>
 
 static void rose_ftimer_expiry(unsigned long);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 40148932..0fc76d8 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -31,7 +31,6 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
-#include <linux/netfilter.h>
 #include <linux/init.h>
 #include <net/rose.h>
 #include <linux/seq_file.h>
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0095b9a..25d60ed 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -632,7 +632,7 @@
 	sock->ops = &rxrpc_rpc_ops;
 	sock->state = SS_UNCONNECTED;
 
-	sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
+	sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index ca904ed..78483b4 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -73,8 +73,8 @@
 	_enter("%p{%d}", local, local->srx.transport_type);
 
 	/* create a socket to represent the local endpoint */
-	ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
-			       &local->socket);
+	ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
+			       IPPROTO_UDP, &local->socket);
 	if (ret < 0) {
 		_leave(" = %d [socket]", ret);
 		return ret;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2274e72..daa3343 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -312,6 +312,7 @@
 config NET_SCH_INGRESS
 	tristate "Ingress Qdisc"
 	depends on NET_CLS_ACT
+	select NET_INGRESS
 	---help---
 	  Say Y here if you want to use classifiers for incoming packets.
 	  If unsure, say Y.
@@ -477,6 +478,16 @@
 	  To compile this code as a module, choose M here: the module will
 	  be called cls_bpf.
 
+config NET_CLS_FLOWER
+	tristate "Flower classifier"
+	select NET_CLS
+	---help---
+	  If you say Y here, you will be able to classify packets based on
+	  a configurable combination of packet keys and masks.
+
+	  To compile this code as a module, choose M here: the module will
+	  be called cls_flower.
+
 config NET_EMATCH
 	bool "Extended Matches"
 	select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 7ca7f4c..690c1689 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_NET_CLS_FLOW)	+= cls_flow.o
 obj-$(CONFIG_NET_CLS_CGROUP)	+= cls_cgroup.o
 obj-$(CONFIG_NET_CLS_BPF)	+= cls_bpf.o
+obj-$(CONFIG_NET_CLS_FLOWER)	+= cls_flower.o
 obj-$(CONFIG_NET_EMATCH)	+= ematch.o
 obj-$(CONFIG_NET_EMATCH_CMP)	+= em_cmp.o
 obj-$(CONFIG_NET_EMATCH_NBYTE)	+= em_nbyte.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3d43e49..af427a3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -392,11 +392,6 @@
 	list_for_each_entry(a, actions, list) {
 repeat:
 		ret = a->ops->act(skb, a, res);
-		if (TC_MUNGED & skb->tc_verd) {
-			/* copied already, allow trampling */
-			skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
-			skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
-		}
 		if (ret == TC_ACT_REPEAT)
 			goto repeat;	/* we need a ttl - JHS */
 		if (ret != TC_ACT_PIPE)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index dc6a2d3..1d56903 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -37,6 +37,7 @@
 {
 	struct tcf_bpf *prog = act->priv;
 	int action, filter_res;
+	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
 
 	if (unlikely(!skb_mac_header_was_set(skb)))
 		return TC_ACT_UNSPEC;
@@ -48,7 +49,13 @@
 
 	/* Needed here for accessing maps. */
 	rcu_read_lock();
-	filter_res = BPF_PROG_RUN(prog->filter, skb);
+	if (at_ingress) {
+		__skb_push(skb, skb->mac_len);
+		filter_res = BPF_PROG_RUN(prog->filter, skb);
+		__skb_pull(skb, skb->mac_len);
+	} else {
+		filter_res = BPF_PROG_RUN(prog->filter, skb);
+	}
 	rcu_read_unlock();
 
 	/* A BPF program may overwrite the default action opcode.
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 3f63cea..a42a3b2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -151,7 +151,7 @@
 	}
 
 	at = G_TC_AT(skb->tc_verd);
-	skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action);
+	skb2 = skb_clone(skb, GFP_ATOMIC);
 	if (skb2 == NULL)
 		goto out;
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 59649d5..17e6d66 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -108,7 +108,7 @@
 		     struct tcf_result *res)
 {
 	struct tcf_pedit *p = a->priv;
-	int i, munged = 0;
+	int i;
 	unsigned int off;
 
 	if (skb_unclone(skb, GFP_ATOMIC))
@@ -156,11 +156,8 @@
 			*ptr = ((*ptr & tkey->mask) ^ tkey->val);
 			if (ptr == &_data)
 				skb_store_bits(skb, off + offset, ptr, 4);
-			munged++;
 		}
 
-		if (munged)
-			skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
 		goto done;
 	} else
 		WARN(1, "pedit BUG: index %d\n", p->tcf_index);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 91bd9c1..c79ecfd 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -64,6 +64,11 @@
 {
 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
 	struct cls_bpf_prog *prog;
+#ifdef CONFIG_NET_CLS_ACT
+	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
+#else
+	bool at_ingress = false;
+#endif
 	int ret = -1;
 
 	if (unlikely(!skb_mac_header_was_set(skb)))
@@ -72,7 +77,16 @@
 	/* Needed here for accessing maps. */
 	rcu_read_lock();
 	list_for_each_entry_rcu(prog, &head->plist, link) {
-		int filter_res = BPF_PROG_RUN(prog->filter, skb);
+		int filter_res;
+
+		if (at_ingress) {
+			/* It is safe to push/pull even if skb_shared() */
+			__skb_push(skb, skb->mac_len);
+			filter_res = BPF_PROG_RUN(prog->filter, skb);
+			__skb_pull(skb, skb->mac_len);
+		} else {
+			filter_res = BPF_PROG_RUN(prog->filter, skb);
+		}
 
 		if (filter_res == 0)
 			continue;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index a620c4e..76bc3a2 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -26,7 +26,7 @@
 #include <net/pkt_cls.h>
 #include <net/ip.h>
 #include <net/route.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
@@ -68,35 +68,41 @@
 
 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	if (flow->src)
-		return ntohl(flow->src);
+	__be32 src = flow_get_u32_src(flow);
+
+	if (src)
+		return ntohl(src);
+
 	return addr_fold(skb->sk);
 }
 
 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	if (flow->dst)
-		return ntohl(flow->dst);
+	__be32 dst = flow_get_u32_dst(flow);
+
+	if (dst)
+		return ntohl(dst);
+
 	return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	return flow->ip_proto;
+	return flow->basic.ip_proto;
 }
 
 static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	if (flow->ports)
-		return ntohs(flow->port16[0]);
+	if (flow->ports.ports)
+		return ntohs(flow->ports.src);
 
 	return addr_fold(skb->sk);
 }
 
 static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	if (flow->ports)
-		return ntohs(flow->port16[1]);
+	if (flow->ports.ports)
+		return ntohs(flow->ports.dst);
 
 	return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
@@ -295,7 +301,7 @@
 
 		keymask = f->keymask;
 		if (keymask & FLOW_KEYS_NEEDED)
-			skb_flow_dissect(skb, &flow_keys);
+			skb_flow_dissect_flow_keys(skb, &flow_keys);
 
 		for (n = 0; n < f->nkeys; n++) {
 			key = ffs(keymask) - 1;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
new file mode 100644
index 0000000..b92d3f4
--- /dev/null
+++ b/net/sched/cls_flower.c
@@ -0,0 +1,691 @@
+/*
+ * net/sched/cls_flower.c		Flower classifier
+ *
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rhashtable.h>
+
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+#include <net/ip.h>
+#include <net/flow_dissector.h>
+
+struct fl_flow_key {
+	int	indev_ifindex;
+	struct flow_dissector_key_control control;
+	struct flow_dissector_key_basic basic;
+	struct flow_dissector_key_eth_addrs eth;
+	struct flow_dissector_key_addrs ipaddrs;
+	union {
+		struct flow_dissector_key_ipv4_addrs ipv4;
+		struct flow_dissector_key_ipv6_addrs ipv6;
+	};
+	struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct fl_flow_mask_range {
+	unsigned short int start;
+	unsigned short int end;
+};
+
+struct fl_flow_mask {
+	struct fl_flow_key key;
+	struct fl_flow_mask_range range;
+	struct rcu_head	rcu;
+};
+
+struct cls_fl_head {
+	struct rhashtable ht;
+	struct fl_flow_mask mask;
+	struct flow_dissector dissector;
+	u32 hgen;
+	bool mask_assigned;
+	struct list_head filters;
+	struct rhashtable_params ht_params;
+	struct rcu_head rcu;
+};
+
+struct cls_fl_filter {
+	struct rhash_head ht_node;
+	struct fl_flow_key mkey;
+	struct tcf_exts exts;
+	struct tcf_result res;
+	struct fl_flow_key key;
+	struct list_head list;
+	u32 handle;
+	struct rcu_head	rcu;
+};
+
+static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
+{
+	return mask->range.end - mask->range.start;
+}
+
+static void fl_mask_update_range(struct fl_flow_mask *mask)
+{
+	const u8 *bytes = (const u8 *) &mask->key;
+	size_t size = sizeof(mask->key);
+	size_t i, first = 0, last = size - 1;
+
+	for (i = 0; i < sizeof(mask->key); i++) {
+		if (bytes[i]) {
+			if (!first && i)
+				first = i;
+			last = i;
+		}
+	}
+	mask->range.start = rounddown(first, sizeof(long));
+	mask->range.end = roundup(last + 1, sizeof(long));
+}
+
+static void *fl_key_get_start(struct fl_flow_key *key,
+			      const struct fl_flow_mask *mask)
+{
+	return (u8 *) key + mask->range.start;
+}
+
+static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
+			      struct fl_flow_mask *mask)
+{
+	const long *lkey = fl_key_get_start(key, mask);
+	const long *lmask = fl_key_get_start(&mask->key, mask);
+	long *lmkey = fl_key_get_start(mkey, mask);
+	int i;
+
+	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
+		*lmkey++ = *lkey++ & *lmask++;
+}
+
+static void fl_clear_masked_range(struct fl_flow_key *key,
+				  struct fl_flow_mask *mask)
+{
+	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
+}
+
+static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+		       struct tcf_result *res)
+{
+	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
+	struct cls_fl_filter *f;
+	struct fl_flow_key skb_key;
+	struct fl_flow_key skb_mkey;
+
+	fl_clear_masked_range(&skb_key, &head->mask);
+	skb_key.indev_ifindex = skb->skb_iif;
+	/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
+	 * so do it rather here.
+	 */
+	skb_key.basic.n_proto = skb->protocol;
+	skb_flow_dissect(skb, &head->dissector, &skb_key);
+
+	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
+
+	f = rhashtable_lookup_fast(&head->ht,
+				   fl_key_get_start(&skb_mkey, &head->mask),
+				   head->ht_params);
+	if (f) {
+		*res = f->res;
+		return tcf_exts_exec(skb, &f->exts, res);
+	}
+	return -1;
+}
+
+static int fl_init(struct tcf_proto *tp)
+{
+	struct cls_fl_head *head;
+
+	head = kzalloc(sizeof(*head), GFP_KERNEL);
+	if (!head)
+		return -ENOBUFS;
+
+	INIT_LIST_HEAD_RCU(&head->filters);
+	rcu_assign_pointer(tp->root, head);
+
+	return 0;
+}
+
+static void fl_destroy_filter(struct rcu_head *head)
+{
+	struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+
+	tcf_exts_destroy(&f->exts);
+	kfree(f);
+}
+
+static bool fl_destroy(struct tcf_proto *tp, bool force)
+{
+	struct cls_fl_head *head = rtnl_dereference(tp->root);
+	struct cls_fl_filter *f, *next;
+
+	if (!force && !list_empty(&head->filters))
+		return false;
+
+	list_for_each_entry_safe(f, next, &head->filters, list) {
+		list_del_rcu(&f->list);
+		call_rcu(&f->rcu, fl_destroy_filter);
+	}
+	RCU_INIT_POINTER(tp->root, NULL);
+	if (head->mask_assigned)
+		rhashtable_destroy(&head->ht);
+	kfree_rcu(head, rcu);
+	return true;
+}
+
+static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
+{
+	struct cls_fl_head *head = rtnl_dereference(tp->root);
+	struct cls_fl_filter *f;
+
+	list_for_each_entry(f, &head->filters, list)
+		if (f->handle == handle)
+			return (unsigned long) f;
+	return 0;
+}
+
+static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
+	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
+	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
+	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
+					    .len = IFNAMSIZ },
+	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
+	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
+	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
+	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
+	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
+	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
+	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
+	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
+	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
+	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
+	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
+	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
+	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
+	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
+	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
+	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
+	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
+	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
+};
+
+static void fl_set_key_val(struct nlattr **tb,
+			   void *val, int val_type,
+			   void *mask, int mask_type, int len)
+{
+	if (!tb[val_type])
+		return;
+	memcpy(val, nla_data(tb[val_type]), len);
+	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
+		memset(mask, 0xff, len);
+	else
+		memcpy(mask, nla_data(tb[mask_type]), len);
+}
+
+static int fl_set_key(struct net *net, struct nlattr **tb,
+		      struct fl_flow_key *key, struct fl_flow_key *mask)
+{
+#ifdef CONFIG_NET_CLS_IND
+	if (tb[TCA_FLOWER_INDEV]) {
+		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
+		if (err < 0)
+			return err;
+		key->indev_ifindex = err;
+		mask->indev_ifindex = 0xffffffff;
+	}
+#endif
+
+	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
+		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
+		       sizeof(key->eth.dst));
+	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
+		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
+		       sizeof(key->eth.src));
+	fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
+		       &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+		       sizeof(key->basic.n_proto));
+	if (key->basic.n_proto == htons(ETH_P_IP) ||
+	    key->basic.n_proto == htons(ETH_P_IPV6)) {
+		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
+			       sizeof(key->basic.ip_proto));
+	}
+	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
+			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+			       sizeof(key->ipv4.src));
+		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
+			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
+			       sizeof(key->ipv4.dst));
+	} else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
+			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+			       sizeof(key->ipv6.src));
+		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
+			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
+			       sizeof(key->ipv6.dst));
+	}
+	if (key->basic.ip_proto == IPPROTO_TCP) {
+		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
+			       &mask->tp.src, TCA_FLOWER_UNSPEC,
+			       sizeof(key->tp.src));
+		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
+			       &mask->tp.dst, TCA_FLOWER_UNSPEC,
+			       sizeof(key->tp.dst));
+	} else if (key->basic.ip_proto == IPPROTO_UDP) {
+		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
+			       &mask->tp.src, TCA_FLOWER_UNSPEC,
+			       sizeof(key->tp.src));
+		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
+			       &mask->tp.dst, TCA_FLOWER_UNSPEC,
+			       sizeof(key->tp.dst));
+	}
+
+	return 0;
+}
+
+static bool fl_mask_eq(struct fl_flow_mask *mask1,
+		       struct fl_flow_mask *mask2)
+{
+	const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
+	const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
+
+	return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
+	       !memcmp(lmask1, lmask2, fl_mask_range(mask1));
+}
+
+static const struct rhashtable_params fl_ht_params = {
+	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
+	.head_offset = offsetof(struct cls_fl_filter, ht_node),
+	.automatic_shrinking = true,
+};
+
+static int fl_init_hashtable(struct cls_fl_head *head,
+			     struct fl_flow_mask *mask)
+{
+	head->ht_params = fl_ht_params;
+	head->ht_params.key_len = fl_mask_range(mask);
+	head->ht_params.key_offset += mask->range.start;
+
+	return rhashtable_init(&head->ht, &head->ht_params);
+}
+
+#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
+#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
+#define FL_KEY_MEMBER_END_OFFSET(member)					\
+	(FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
+
+#define FL_KEY_IN_RANGE(mask, member)						\
+        (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end &&			\
+         FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
+
+#define FL_KEY_SET(keys, cnt, id, member)					\
+	do {									\
+		keys[cnt].key_id = id;						\
+		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
+		cnt++;								\
+	} while(0);
+
+#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member)			\
+	do {									\
+		if (FL_KEY_IN_RANGE(mask, member))				\
+			FL_KEY_SET(keys, cnt, id, member);			\
+	} while(0);
+
+static void fl_init_dissector(struct cls_fl_head *head,
+			      struct fl_flow_mask *mask)
+{
+	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
+	size_t cnt = 0;
+
+	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
+	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
+	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+			       FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
+	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+			       FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+			       FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
+	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+			       FLOW_DISSECTOR_KEY_PORTS, tp);
+
+	skb_flow_dissector_init(&head->dissector, keys, cnt);
+}
+
+static int fl_check_assign_mask(struct cls_fl_head *head,
+				struct fl_flow_mask *mask)
+{
+	int err;
+
+	if (head->mask_assigned) {
+		if (!fl_mask_eq(&head->mask, mask))
+			return -EINVAL;
+		else
+			return 0;
+	}
+
+	/* Mask is not assigned yet. So assign it and init hashtable
+	 * according to that.
+	 */
+	err = fl_init_hashtable(head, mask);
+	if (err)
+		return err;
+	memcpy(&head->mask, mask, sizeof(head->mask));
+	head->mask_assigned = true;
+
+	fl_init_dissector(head, mask);
+
+	return 0;
+}
+
+static int fl_set_parms(struct net *net, struct tcf_proto *tp,
+			struct cls_fl_filter *f, struct fl_flow_mask *mask,
+			unsigned long base, struct nlattr **tb,
+			struct nlattr *est, bool ovr)
+{
+	struct tcf_exts e;
+	int err;
+
+	tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
+	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+	if (err < 0)
+		return err;
+
+	if (tb[TCA_FLOWER_CLASSID]) {
+		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
+		tcf_bind_filter(tp, &f->res, base);
+	}
+
+	err = fl_set_key(net, tb, &f->key, &mask->key);
+	if (err)
+		goto errout;
+
+	fl_mask_update_range(mask);
+	fl_set_masked_key(&f->mkey, &f->key, mask);
+
+	tcf_exts_change(tp, &f->exts, &e);
+
+	return 0;
+errout:
+	tcf_exts_destroy(&e);
+	return err;
+}
+
+static u32 fl_grab_new_handle(struct tcf_proto *tp,
+			      struct cls_fl_head *head)
+{
+	unsigned int i = 0x80000000;
+	u32 handle;
+
+	do {
+		if (++head->hgen == 0x7FFFFFFF)
+			head->hgen = 1;
+	} while (--i > 0 && fl_get(tp, head->hgen));
+
+	if (unlikely(i == 0)) {
+		pr_err("Insufficient number of handles\n");
+		handle = 0;
+	} else {
+		handle = head->hgen;
+	}
+
+	return handle;
+}
+
+static int fl_change(struct net *net, struct sk_buff *in_skb,
+		     struct tcf_proto *tp, unsigned long base,
+		     u32 handle, struct nlattr **tca,
+		     unsigned long *arg, bool ovr)
+{
+	struct cls_fl_head *head = rtnl_dereference(tp->root);
+	struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
+	struct cls_fl_filter *fnew;
+	struct nlattr *tb[TCA_FLOWER_MAX + 1];
+	struct fl_flow_mask mask = {};
+	int err;
+
+	if (!tca[TCA_OPTIONS])
+		return -EINVAL;
+
+	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
+	if (err < 0)
+		return err;
+
+	if (fold && handle && fold->handle != handle)
+		return -EINVAL;
+
+	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
+	if (!fnew)
+		return -ENOBUFS;
+
+	tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
+
+	if (!handle) {
+		handle = fl_grab_new_handle(tp, head);
+		if (!handle) {
+			err = -EINVAL;
+			goto errout;
+		}
+	}
+	fnew->handle = handle;
+
+	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
+	if (err)
+		goto errout;
+
+	err = fl_check_assign_mask(head, &mask);
+	if (err)
+		goto errout;
+
+	err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
+				     head->ht_params);
+	if (err)
+		goto errout;
+	if (fold)
+		rhashtable_remove_fast(&head->ht, &fold->ht_node,
+				       head->ht_params);
+
+	*arg = (unsigned long) fnew;
+
+	if (fold) {
+		list_replace_rcu(&fnew->list, &fold->list);
+		tcf_unbind_filter(tp, &fold->res);
+		call_rcu(&fold->rcu, fl_destroy_filter);
+	} else {
+		list_add_tail_rcu(&fnew->list, &head->filters);
+	}
+
+	return 0;
+
+errout:
+	kfree(fnew);
+	return err;
+}
+
+static int fl_delete(struct tcf_proto *tp, unsigned long arg)
+{
+	struct cls_fl_head *head = rtnl_dereference(tp->root);
+	struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
+
+	rhashtable_remove_fast(&head->ht, &f->ht_node,
+			       head->ht_params);
+	list_del_rcu(&f->list);
+	tcf_unbind_filter(tp, &f->res);
+	call_rcu(&f->rcu, fl_destroy_filter);
+	return 0;
+}
+
+static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+	struct cls_fl_head *head = rtnl_dereference(tp->root);
+	struct cls_fl_filter *f;
+
+	list_for_each_entry_rcu(f, &head->filters, list) {
+		if (arg->count < arg->skip)
+			goto skip;
+		if (arg->fn(tp, (unsigned long) f, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+skip:
+		arg->count++;
+	}
+}
+
+static int fl_dump_key_val(struct sk_buff *skb,
+			   void *val, int val_type,
+			   void *mask, int mask_type, int len)
+{
+	int err;
+
+	if (!memchr_inv(mask, 0, len))
+		return 0;
+	err = nla_put(skb, val_type, len, val);
+	if (err)
+		return err;
+	if (mask_type != TCA_FLOWER_UNSPEC) {
+		err = nla_put(skb, mask_type, len, mask);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
+		   struct sk_buff *skb, struct tcmsg *t)
+{
+	struct cls_fl_head *head = rtnl_dereference(tp->root);
+	struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
+	struct nlattr *nest;
+	struct fl_flow_key *key, *mask;
+
+	if (!f)
+		return skb->len;
+
+	t->tcm_handle = f->handle;
+
+	nest = nla_nest_start(skb, TCA_OPTIONS);
+	if (!nest)
+		goto nla_put_failure;
+
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
+		goto nla_put_failure;
+
+	key = &f->key;
+	mask = &head->mask.key;
+
+	if (mask->indev_ifindex) {
+		struct net_device *dev;
+
+		dev = __dev_get_by_index(net, key->indev_ifindex);
+		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
+			goto nla_put_failure;
+	}
+
+	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
+			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
+			    sizeof(key->eth.dst)) ||
+	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
+			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
+			    sizeof(key->eth.src)) ||
+	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
+			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+			    sizeof(key->basic.n_proto)))
+		goto nla_put_failure;
+	if ((key->basic.n_proto == htons(ETH_P_IP) ||
+	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
+	    fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
+			    sizeof(key->basic.ip_proto)))
+		goto nla_put_failure;
+
+	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
+	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
+			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+			     sizeof(key->ipv4.src)) ||
+	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
+			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
+			     sizeof(key->ipv4.dst))))
+		goto nla_put_failure;
+	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
+		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
+				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+				  sizeof(key->ipv6.src)) ||
+		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
+				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
+				  sizeof(key->ipv6.dst))))
+		goto nla_put_failure;
+
+	if (key->basic.ip_proto == IPPROTO_TCP &&
+	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
+			     &mask->tp.src, TCA_FLOWER_UNSPEC,
+			     sizeof(key->tp.src)) ||
+	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
+			     &mask->tp.dst, TCA_FLOWER_UNSPEC,
+			     sizeof(key->tp.dst))))
+		goto nla_put_failure;
+	else if (key->basic.ip_proto == IPPROTO_UDP &&
+		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
+				  &mask->tp.src, TCA_FLOWER_UNSPEC,
+				  sizeof(key->tp.src)) ||
+		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
+				  &mask->tp.dst, TCA_FLOWER_UNSPEC,
+				  sizeof(key->tp.dst))))
+		goto nla_put_failure;
+
+	if (tcf_exts_dump(skb, &f->exts))
+		goto nla_put_failure;
+
+	nla_nest_end(skb, nest);
+
+	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+		goto nla_put_failure;
+
+	return skb->len;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nest);
+	return -1;
+}
+
+static struct tcf_proto_ops cls_fl_ops __read_mostly = {
+	.kind		= "flower",
+	.classify	= fl_classify,
+	.init		= fl_init,
+	.destroy	= fl_destroy,
+	.get		= fl_get,
+	.change		= fl_change,
+	.delete		= fl_delete,
+	.walk		= fl_walk,
+	.dump		= fl_dump,
+	.owner		= THIS_MODULE,
+};
+
+static int __init cls_fl_init(void)
+{
+	return register_tcf_proto_ops(&cls_fl_ops);
+}
+
+static void __exit cls_fl_exit(void)
+{
+	unregister_tcf_proto_ops(&cls_fl_ops);
+}
+
+module_init(cls_fl_init);
+module_exit(cls_fl_exit);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("Flower classifier");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index a3d79c8..df0328b 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -92,8 +92,8 @@
 
 	rcu_read_lock();
 
-	if (dev && skb->skb_iif)
-		indev = dev_get_by_index_rcu(dev_net(dev), skb->skb_iif);
+	if (skb->skb_iif)
+		indev = dev_get_by_index_rcu(em->net, skb->skb_iif);
 
 	acpar.in      = indev ? indev : dev;
 	acpar.out     = dev;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 73a123d..f06aa01 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1818,13 +1818,8 @@
 			continue;
 		err = tp->classify(skb, tp, res);
 
-		if (err >= 0) {
-#ifdef CONFIG_NET_CLS_ACT
-			if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
-				skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
-#endif
+		if (err >= 0)
 			return err;
-		}
 	}
 	return -1;
 }
@@ -1836,23 +1831,22 @@
 	int err = 0;
 #ifdef CONFIG_NET_CLS_ACT
 	const struct tcf_proto *otp = tp;
+	int limit = 0;
 reclassify:
 #endif
 
 	err = tc_classify_compat(skb, tp, res);
 #ifdef CONFIG_NET_CLS_ACT
 	if (err == TC_ACT_RECLASSIFY) {
-		u32 verd = G_TC_VERD(skb->tc_verd);
 		tp = otp;
 
-		if (verd++ >= MAX_REC_LOOP) {
+		if (unlikely(limit++ >= MAX_REC_LOOP)) {
 			net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
 					       tp->q->ops->id,
 					       tp->prio & 0xffff,
 					       ntohs(tp->protocol));
 			return TC_ACT_SHOT;
 		}
-		skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
 		goto reclassify;
 	}
 #endif
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index c009eb9..93d5742 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -18,7 +18,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/red.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 
 /*
    CHOKe stateless AQM for fair bandwidth allocation
@@ -133,16 +133,10 @@
 	--sch->q.qlen;
 }
 
-/* private part of skb->cb[] that a qdisc is allowed to use
- * is limited to QDISC_CB_PRIV_LEN bytes.
- * As a flow key might be too large, we store a part of it only.
- */
-#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
-
 struct choke_skb_cb {
 	u16			classid;
 	u8			keys_valid;
-	u8			keys[QDISC_CB_PRIV_LEN - 3];
+	struct			flow_keys_digest keys;
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -176,19 +170,19 @@
 
 	if (!choke_skb_cb(skb1)->keys_valid) {
 		choke_skb_cb(skb1)->keys_valid = 1;
-		skb_flow_dissect(skb1, &temp);
-		memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
+		skb_flow_dissect_flow_keys(skb1, &temp);
+		make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
 	}
 
 	if (!choke_skb_cb(skb2)->keys_valid) {
 		choke_skb_cb(skb2)->keys_valid = 1;
-		skb_flow_dissect(skb2, &temp);
-		memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
+		skb_flow_dissect_flow_keys(skb2, &temp);
+		make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
 	}
 
 	return !memcmp(&choke_skb_cb(skb1)->keys,
 		       &choke_skb_cb(skb2)->keys,
-		       CHOKE_K_LEN);
+		       sizeof(choke_skb_cb(skb1)->keys));
 }
 
 /*
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 7a0bdb1..535007d 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -6,7 +6,7 @@
  *
  *  Implemented on linux by :
  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -109,6 +109,7 @@
 	[TCA_CODEL_LIMIT]	= { .type = NLA_U32 },
 	[TCA_CODEL_INTERVAL]	= { .type = NLA_U32 },
 	[TCA_CODEL_ECN]		= { .type = NLA_U32 },
+	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 };
 
 static int codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -133,6 +134,12 @@
 		q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
 	}
 
+	if (tb[TCA_CODEL_CE_THRESHOLD]) {
+		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
+
+		q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+	}
+
 	if (tb[TCA_CODEL_INTERVAL]) {
 		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
 
@@ -201,7 +208,10 @@
 	    nla_put_u32(skb, TCA_CODEL_ECN,
 			q->params.ecn))
 		goto nla_put_failure;
-
+	if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+	    nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
+			codel_time_to_us(q->params.ce_threshold)))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
@@ -220,6 +230,7 @@
 		.ldelay		= codel_time_to_us(q->vars.ldelay),
 		.dropping	= q->vars.dropping,
 		.ecn_mark	= q->stats.ecn_mark,
+		.ce_mark	= q->stats.ce_mark,
 	};
 
 	if (q->vars.dropping) {
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index c244c45b..d75993f 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -6,7 +6,7 @@
  *	as published by the Free Software Foundation; either version
  *	2 of the License, or (at your option) any later version.
  *
- *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  */
 
 #include <linux/module.h>
@@ -23,7 +23,6 @@
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
-#include <net/flow_keys.h>
 #include <net/codel.h>
 
 /*	Fair Queue CoDel.
@@ -68,15 +67,9 @@
 };
 
 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
-				  const struct sk_buff *skb)
+				  struct sk_buff *skb)
 {
-	struct flow_keys keys;
-	unsigned int hash;
-
-	skb_flow_dissect(skb, &keys);
-	hash = jhash_3words((__force u32)keys.dst,
-			    (__force u32)keys.src ^ keys.ip_proto,
-			    (__force u32)keys.ports, q->perturbation);
+	u32 hash = skb_get_hash_perturb(skb, q->perturbation);
 
 	return reciprocal_scale(hash, q->flows_cnt);
 }
@@ -299,6 +292,7 @@
 	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
 	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
 	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
+	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
 };
 
 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -329,6 +323,12 @@
 		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
 	}
 
+	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
+		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
+
+		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+	}
+
 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
 
@@ -448,6 +448,11 @@
 			q->flows_cnt))
 		goto nla_put_failure;
 
+	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
+			codel_time_to_us(q->cparams.ce_threshold)))
+		goto nla_put_failure;
+
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
@@ -466,6 +471,7 @@
 	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
 	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
 	st.qdisc_stats.new_flow_count = q->new_flow_count;
+	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
 
 	list_for_each(pos, &q->new_flows)
 		st.qdisc_stats.new_flows_len++;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 634529e..abb9f2f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -165,7 +165,8 @@
 			 * if no default DP has been configured. This
 			 * allows for DP flows to be left untouched.
 			 */
-			if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
+			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
+					sch->limit))
 				return qdisc_enqueue_tail(skb, sch);
 			else
 				goto drop;
@@ -397,7 +398,10 @@
 
 	q->DP = dp;
 	q->prio = prio;
-	q->limit = ctl->limit;
+	if (ctl->limit > sch->limit)
+		q->limit = sch->limit;
+	else
+		q->limit = ctl->limit;
 
 	if (q->backlog == 0)
 		red_end_of_idle_period(&q->vars);
@@ -414,6 +418,7 @@
 	[TCA_GRED_STAB]		= { .len = 256 },
 	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
 	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
+	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -433,11 +438,15 @@
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
+	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
+		if (tb[TCA_GRED_LIMIT] != NULL)
+			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
 		return gred_change_table_def(sch, opt);
+	}
 
 	if (tb[TCA_GRED_PARMS] == NULL ||
-	    tb[TCA_GRED_STAB] == NULL)
+	    tb[TCA_GRED_STAB] == NULL ||
+	    tb[TCA_GRED_LIMIT] != NULL)
 		return -EINVAL;
 
 	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
@@ -501,6 +510,14 @@
 	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
 		return -EINVAL;
 
+	if (tb[TCA_GRED_LIMIT])
+		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
+	else {
+		u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
+
+		sch->limit = qlen * psched_mtu(qdisc_dev(sch));
+	}
+
 	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 }
 
@@ -531,6 +548,9 @@
 	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
 		goto nla_put_failure;
 
+	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
+		goto nla_put_failure;
+
 	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 	if (parms == NULL)
 		goto nla_put_failure;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 15d3aab..9d15cb6 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
-#include <net/flow_keys.h>
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 
@@ -176,22 +175,6 @@
 	return jiffies;
 }
 
-static unsigned int skb_hash(const struct hhf_sched_data *q,
-			     const struct sk_buff *skb)
-{
-	struct flow_keys keys;
-	unsigned int hash;
-
-	if (skb->sk && skb->sk->sk_hash)
-		return skb->sk->sk_hash;
-
-	skb_flow_dissect(skb, &keys);
-	hash = jhash_3words((__force u32)keys.dst,
-			    (__force u32)keys.src ^ keys.ip_proto,
-			    (__force u32)keys.ports, q->perturbation);
-	return hash;
-}
-
 /* Looks up a heavy-hitter flow in a chaining list of table T. */
 static struct hh_flow_state *seek_list(const u32 hash,
 				       struct list_head *head,
@@ -280,7 +263,7 @@
 	}
 
 	/* Get hashed flow-id of the skb. */
-	hash = skb_hash(q, skb);
+	hash = skb_get_hash_perturb(skb, q->perturbation);
 
 	/* Check if this packet belongs to an already established HH flow. */
 	flow_pos = hash & HHF_BIT_MASK;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 4cdbfb8..e7c648f 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -12,16 +12,10 @@
 #include <linux/list.h>
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
+
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-
-struct ingress_qdisc_data {
-	struct tcf_proto __rcu	*filter_list;
-};
-
-/* ------------------------- Class/flow operations ------------------------- */
-
 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
 {
 	return NULL;
@@ -49,57 +43,24 @@
 static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
 						 unsigned long cl)
 {
-	struct ingress_qdisc_data *p = qdisc_priv(sch);
+	struct net_device *dev = qdisc_dev(sch);
 
-	return &p->filter_list;
+	return &dev->ingress_cl_list;
 }
 
-/* --------------------------- Qdisc operations ---------------------------- */
-
-static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
-{
-	struct ingress_qdisc_data *p = qdisc_priv(sch);
-	struct tcf_result res;
-	struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
-	int result;
-
-	result = tc_classify(skb, fl, &res);
-
-	qdisc_bstats_update(sch, skb);
-	switch (result) {
-	case TC_ACT_SHOT:
-		result = TC_ACT_SHOT;
-		qdisc_qstats_drop(sch);
-		break;
-	case TC_ACT_STOLEN:
-	case TC_ACT_QUEUED:
-		result = TC_ACT_STOLEN;
-		break;
-	case TC_ACT_RECLASSIFY:
-	case TC_ACT_OK:
-		skb->tc_index = TC_H_MIN(res.classid);
-	default:
-		result = TC_ACT_OK;
-		break;
-	}
-
-	return result;
-}
-
-/* ------------------------------------------------------------- */
-
 static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	net_inc_ingress_queue();
+	sch->flags |= TCQ_F_CPUSTATS;
 
 	return 0;
 }
 
 static void ingress_destroy(struct Qdisc *sch)
 {
-	struct ingress_qdisc_data *p = qdisc_priv(sch);
+	struct net_device *dev = qdisc_dev(sch);
 
-	tcf_destroy_chain(&p->filter_list);
+	tcf_destroy_chain(&dev->ingress_cl_list);
 	net_dec_ingress_queue();
 }
 
@@ -110,6 +71,7 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
+
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
@@ -130,8 +92,6 @@
 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
 	.cl_ops		=	&ingress_class_ops,
 	.id		=	"ingress",
-	.priv_size	=	sizeof(struct ingress_qdisc_data),
-	.enqueue	=	ingress_enqueue,
 	.init		=	ingress_init,
 	.destroy	=	ingress_destroy,
 	.dump		=	ingress_dump,
@@ -148,6 +108,7 @@
 	unregister_qdisc(&ingress_qdisc_ops);
 }
 
-module_init(ingress_module_init)
-module_exit(ingress_module_exit)
+module_init(ingress_module_init);
+module_exit(ingress_module_exit);
+
 MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 956ead2..5abd1d9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -440,9 +440,9 @@
 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 		struct Qdisc *rootq = qdisc_root(sch);
 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
-		q->duplicate = 0;
 
-		qdisc_enqueue_root(skb2, rootq);
+		q->duplicate = 0;
+		rootq->enqueue(skb2, rootq);
 		q->duplicate = dupsave;
 	}
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 3ec7e88..b8d73bc 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -339,8 +339,7 @@
 
 static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
-	if (!hlist_unhashed(&agg->nonfull_next))
-		hlist_del_init(&agg->nonfull_next);
+	hlist_del_init(&agg->nonfull_next);
 	q->wsum -= agg->class_weight;
 	if (q->wsum != 0)
 		q->iwsum = ONE_FP / q->wsum;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 5819dd8..4b81519 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -26,7 +26,6 @@
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
-#include <net/flow_keys.h>
 
 /*
  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -285,9 +284,9 @@
 	int i;
 	u32 p_min = ~0;
 	u32 minqlen = ~0;
-	u32 r, slot, salt, sfbhash;
+	u32 r, sfbhash;
+	u32 slot = q->slot;
 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	struct flow_keys keys;
 
 	if (unlikely(sch->q.qlen >= q->limit)) {
 		qdisc_qstats_overlimit(sch);
@@ -309,22 +308,17 @@
 
 	fl = rcu_dereference_bh(q->filter_list);
 	if (fl) {
+		u32 salt;
+
 		/* If using external classifiers, get result and record it. */
 		if (!sfb_classify(skb, fl, &ret, &salt))
 			goto other_drop;
-		keys.src = salt;
-		keys.dst = 0;
-		keys.ports = 0;
+		sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
 	} else {
-		skb_flow_dissect(skb, &keys);
+		sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
 	}
 
-	slot = q->slot;
 
-	sfbhash = jhash_3words((__force u32)keys.dst,
-			       (__force u32)keys.src,
-			       (__force u32)keys.ports,
-			       q->bins[slot].perturbation);
 	if (!sfbhash)
 		sfbhash = 1;
 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -356,10 +350,8 @@
 	if (unlikely(p_min >= SFB_MAX_PROB)) {
 		/* Inelastic flow */
 		if (q->double_buffering) {
-			sfbhash = jhash_3words((__force u32)keys.dst,
-					       (__force u32)keys.src,
-					       (__force u32)keys.ports,
-					       q->bins[slot].perturbation);
+			sfbhash = skb_get_hash_perturb(skb,
+			    q->bins[slot].perturbation);
 			if (!sfbhash)
 				sfbhash = 1;
 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b877140..7d14926 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -23,7 +23,6 @@
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
-#include <net/flow_keys.h>
 #include <net/red.h>
 
 
@@ -156,30 +155,10 @@
 	return &q->dep[val - SFQ_MAX_FLOWS];
 }
 
-/*
- * In order to be able to quickly rehash our queue when timer changes
- * q->perturbation, we store flow_keys in skb->cb[]
- */
-struct sfq_skb_cb {
-       struct flow_keys        keys;
-};
-
-static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
-{
-	qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
-	return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
 static unsigned int sfq_hash(const struct sfq_sched_data *q,
 			     const struct sk_buff *skb)
 {
-	const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
-	unsigned int hash;
-
-	hash = jhash_3words((__force u32)keys->dst,
-			    (__force u32)keys->src ^ keys->ip_proto,
-			    (__force u32)keys->ports, q->perturbation);
-	return hash & (q->divisor - 1);
+	return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -196,10 +175,8 @@
 		return TC_H_MIN(skb->priority);
 
 	fl = rcu_dereference_bh(q->filter_list);
-	if (!fl) {
-		skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
+	if (!fl)
 		return sfq_hash(q, skb) + 1;
-	}
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	result = tc_classify(skb, fl, &res);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0e4198e..e917d27 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -331,8 +331,9 @@
 
 		rt = (struct rt6_info *)dst;
 		t->dst = dst;
-		t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
-		pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
+		t->dst_cookie = rt6_get_cookie(rt);
+		pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
+			 &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
 			 &fl6->saddr);
 	} else {
 		t->dst = NULL;
@@ -635,7 +636,7 @@
 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 	struct sctp6_sock *newsctp6sk;
 
-	newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
+	newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
 	if (!newsk)
 		goto out;
 
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 53b7acd..59e8035 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -550,7 +550,7 @@
 					     struct sctp_association *asoc)
 {
 	struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
-			sk->sk_prot);
+			sk->sk_prot, 0);
 	struct inet_sock *newinet;
 
 	if (!newsk)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f09de7f..5f6c4e6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1528,8 +1528,10 @@
 
 	/* Supposedly, no process has access to the socket, but
 	 * the net layers still may.
+	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+	 * held and that should be grabbed before socket lock.
 	 */
-	local_bh_disable();
+	spin_lock_bh(&net->sctp.addr_wq_lock);
 	bh_lock_sock(sk);
 
 	/* Hold the sock, since sk_common_release() will put sock_put()
@@ -1539,7 +1541,7 @@
 	sk_common_release(sk);
 
 	bh_unlock_sock(sk);
-	local_bh_enable();
+	spin_unlock_bh(&net->sctp.addr_wq_lock);
 
 	sock_put(sk);
 
@@ -3580,6 +3582,7 @@
 	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
 		return 0;
 
+	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
 	if (val == 0 && sp->do_auto_asconf) {
 		list_del(&sp->auto_asconf_list);
 		sp->do_auto_asconf = 0;
@@ -3588,6 +3591,7 @@
 		    &sock_net(sk)->sctp.auto_asconf_splist);
 		sp->do_auto_asconf = 1;
 	}
+	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
 	return 0;
 }
 
@@ -4121,18 +4125,28 @@
 	local_bh_disable();
 	percpu_counter_inc(&sctp_sockets_allocated);
 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+
+	/* Nothing can fail after this block, otherwise
+	 * sctp_destroy_sock() will be called without addr_wq_lock held
+	 */
 	if (net->sctp.default_auto_asconf) {
+		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
 		list_add_tail(&sp->auto_asconf_list,
 		    &net->sctp.auto_asconf_splist);
 		sp->do_auto_asconf = 1;
-	} else
+		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+	} else {
 		sp->do_auto_asconf = 0;
+	}
+
 	local_bh_enable();
 
 	return 0;
 }
 
-/* Cleanup any SCTP per socket resources.  */
+/* Cleanup any SCTP per socket resources. Must be called with
+ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
+ */
 static void sctp_destroy_sock(struct sock *sk)
 {
 	struct sctp_sock *sp;
@@ -7195,6 +7209,19 @@
 	newinet->mc_list = NULL;
 }
 
+static inline void sctp_copy_descendant(struct sock *sk_to,
+					const struct sock *sk_from)
+{
+	int ancestor_size = sizeof(struct inet_sock) +
+			    sizeof(struct sctp_sock) -
+			    offsetof(struct sctp_sock, auto_asconf_list);
+
+	if (sk_from->sk_family == PF_INET6)
+		ancestor_size += sizeof(struct ipv6_pinfo);
+
+	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+}
+
 /* Populate the fields of the newsk from the oldsk and migrate the assoc
  * and its messages to the newsk.
  */
@@ -7209,7 +7236,6 @@
 	struct sk_buff *skb, *tmp;
 	struct sctp_ulpevent *event;
 	struct sctp_bind_hashbucket *head;
-	struct list_head tmplist;
 
 	/* Migrate socket buffer sizes and all the socket level options to the
 	 * new socket.
@@ -7217,12 +7243,7 @@
 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
 	/* Brute force copy old sctp opt. */
-	if (oldsp->do_auto_asconf) {
-		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
-		inet_sk_copy_descendant(newsk, oldsk);
-		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
-	} else
-		inet_sk_copy_descendant(newsk, oldsk);
+	sctp_copy_descendant(newsk, oldsk);
 
 	/* Restore the ep value that was overwritten with the above structure
 	 * copy.
diff --git a/net/socket.c b/net/socket.c
index 884e329..9963a0b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -576,9 +576,6 @@
 	if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
 		pr_err("%s: fasync list not empty!\n", __func__);
 
-	if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
-		return;
-
 	this_cpu_sub(sockets_in_use, 1);
 	if (!sock->file) {
 		iput(SOCK_INODE(sock));
@@ -1213,9 +1210,9 @@
 }
 EXPORT_SYMBOL(sock_create);
 
-int sock_create_kern(int family, int type, int protocol, struct socket **res)
+int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
 {
-	return __sock_create(&init_net, family, type, protocol, res, 1);
+	return __sock_create(net, family, type, protocol, res, 1);
 }
 EXPORT_SYMBOL(sock_create_kern);
 
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 055453d48..84f77a0 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -15,97 +15,362 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <linux/if_bridge.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
 /**
- *	netdev_switch_parent_id_get - Get ID of a switch
- *	@dev: port device
- *	@psid: switch ID
+ *	switchdev_port_attr_get - Get port attribute
  *
- *	Get ID of a switch this port is part of.
- */
-int netdev_switch_parent_id_get(struct net_device *dev,
-				struct netdev_phys_item_id *psid)
-{
-	const struct swdev_ops *ops = dev->swdev_ops;
-
-	if (!ops || !ops->swdev_parent_id_get)
-		return -EOPNOTSUPP;
-	return ops->swdev_parent_id_get(dev, psid);
-}
-EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
-
-/**
- *	netdev_switch_port_stp_update - Notify switch device port of STP
- *					state change
  *	@dev: port device
- *	@state: port STP state
- *
- *	Notify switch device port of bridge port STP state change.
+ *	@attr: attribute to get
  */
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
+int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 {
-	const struct swdev_ops *ops = dev->swdev_ops;
+	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
 	struct list_head *iter;
+	struct switchdev_attr first = {
+		.id = SWITCHDEV_ATTR_UNDEFINED
+	};
 	int err = -EOPNOTSUPP;
 
-	if (ops && ops->swdev_port_stp_update)
-		return ops->swdev_port_stp_update(dev, state);
+	if (ops && ops->switchdev_port_attr_get)
+		return ops->switchdev_port_attr_get(dev, attr);
+
+	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+		return err;
+
+	/* Switch device port(s) may be stacked under
+	 * bond/team/vlan dev, so recurse down to get attr on
+	 * each port.  Return -ENODATA if attr values don't
+	 * compare across ports.
+	 */
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = netdev_switch_port_stp_update(lower_dev, state);
-		if (err && err != -EOPNOTSUPP)
-			return err;
+		err = switchdev_port_attr_get(lower_dev, attr);
+		if (err)
+			break;
+		if (first.id == SWITCHDEV_ATTR_UNDEFINED)
+			first = *attr;
+		else if (memcmp(&first, attr, sizeof(*attr)))
+			return -ENODATA;
 	}
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
+EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
 
-static DEFINE_MUTEX(netdev_switch_mutex);
-static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
+static int __switchdev_port_attr_set(struct net_device *dev,
+				     struct switchdev_attr *attr)
+{
+	const struct switchdev_ops *ops = dev->switchdev_ops;
+	struct net_device *lower_dev;
+	struct list_head *iter;
+	int err = -EOPNOTSUPP;
+
+	if (ops && ops->switchdev_port_attr_set)
+		return ops->switchdev_port_attr_set(dev, attr);
+
+	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+		return err;
+
+	/* Switch device port(s) may be stacked under
+	 * bond/team/vlan dev, so recurse down to set attr on
+	 * each port.
+	 */
+
+	netdev_for_each_lower_dev(dev, lower_dev, iter) {
+		err = __switchdev_port_attr_set(lower_dev, attr);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+struct switchdev_attr_set_work {
+	struct work_struct work;
+	struct net_device *dev;
+	struct switchdev_attr attr;
+};
+
+static void switchdev_port_attr_set_work(struct work_struct *work)
+{
+	struct switchdev_attr_set_work *asw =
+		container_of(work, struct switchdev_attr_set_work, work);
+	int err;
+
+	rtnl_lock();
+	err = switchdev_port_attr_set(asw->dev, &asw->attr);
+	if (err && err != -EOPNOTSUPP)
+		netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
+			   err, asw->attr.id);
+	rtnl_unlock();
+
+	dev_put(asw->dev);
+	kfree(work);
+}
+
+static int switchdev_port_attr_set_defer(struct net_device *dev,
+					 struct switchdev_attr *attr)
+{
+	struct switchdev_attr_set_work *asw;
+
+	asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
+	if (!asw)
+		return -ENOMEM;
+
+	INIT_WORK(&asw->work, switchdev_port_attr_set_work);
+
+	dev_hold(dev);
+	asw->dev = dev;
+	memcpy(&asw->attr, attr, sizeof(asw->attr));
+
+	schedule_work(&asw->work);
+
+	return 0;
+}
 
 /**
- *	register_netdev_switch_notifier - Register notifier
+ *	switchdev_port_attr_set - Set port attribute
+ *
+ *	@dev: port device
+ *	@attr: attribute to set
+ *
+ *	Use a 2-phase prepare-commit transaction model to ensure
+ *	system is not left in a partially updated state due to
+ *	failure from driver/device.
+ */
+int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
+{
+	int err;
+
+	if (!rtnl_is_locked()) {
+		/* Running prepare-commit transaction across stacked
+		 * devices requires nothing moves, so if rtnl_lock is
+		 * not held, schedule a worker thread to hold rtnl_lock
+		 * while setting attr.
+		 */
+
+		return switchdev_port_attr_set_defer(dev, attr);
+	}
+
+	/* Phase I: prepare for attr set. Driver/device should fail
+	 * here if there are going to be issues in the commit phase,
+	 * such as lack of resources or support.  The driver/device
+	 * should reserve resources needed for the commit phase here,
+	 * but should not commit the attr.
+	 */
+
+	attr->trans = SWITCHDEV_TRANS_PREPARE;
+	err = __switchdev_port_attr_set(dev, attr);
+	if (err) {
+		/* Prepare phase failed: abort the transaction.  Any
+		 * resources reserved in the prepare phase are
+		 * released.
+		 */
+
+		attr->trans = SWITCHDEV_TRANS_ABORT;
+		__switchdev_port_attr_set(dev, attr);
+
+		return err;
+	}
+
+	/* Phase II: commit attr set.  This cannot fail as a fault
+	 * of driver/device.  If it does, it's a bug in the driver/device
+	 * because the driver said everythings was OK in phase I.
+	 */
+
+	attr->trans = SWITCHDEV_TRANS_COMMIT;
+	err = __switchdev_port_attr_set(dev, attr);
+	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
+	     dev->name, attr->id);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
+
+static int __switchdev_port_obj_add(struct net_device *dev,
+				    struct switchdev_obj *obj)
+{
+	const struct switchdev_ops *ops = dev->switchdev_ops;
+	struct net_device *lower_dev;
+	struct list_head *iter;
+	int err = -EOPNOTSUPP;
+
+	if (ops && ops->switchdev_port_obj_add)
+		return ops->switchdev_port_obj_add(dev, obj);
+
+	/* Switch device port(s) may be stacked under
+	 * bond/team/vlan dev, so recurse down to add object on
+	 * each port.
+	 */
+
+	netdev_for_each_lower_dev(dev, lower_dev, iter) {
+		err = __switchdev_port_obj_add(lower_dev, obj);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+/**
+ *	switchdev_port_obj_add - Add port object
+ *
+ *	@dev: port device
+ *	@obj: object to add
+ *
+ *	Use a 2-phase prepare-commit transaction model to ensure
+ *	system is not left in a partially updated state due to
+ *	failure from driver/device.
+ *
+ *	rtnl_lock must be held.
+ */
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
+{
+	int err;
+
+	ASSERT_RTNL();
+
+	/* Phase I: prepare for obj add. Driver/device should fail
+	 * here if there are going to be issues in the commit phase,
+	 * such as lack of resources or support.  The driver/device
+	 * should reserve resources needed for the commit phase here,
+	 * but should not commit the obj.
+	 */
+
+	obj->trans = SWITCHDEV_TRANS_PREPARE;
+	err = __switchdev_port_obj_add(dev, obj);
+	if (err) {
+		/* Prepare phase failed: abort the transaction.  Any
+		 * resources reserved in the prepare phase are
+		 * released.
+		 */
+
+		obj->trans = SWITCHDEV_TRANS_ABORT;
+		__switchdev_port_obj_add(dev, obj);
+
+		return err;
+	}
+
+	/* Phase II: commit obj add.  This cannot fail as a fault
+	 * of driver/device.  If it does, it's a bug in the driver/device
+	 * because the driver said everythings was OK in phase I.
+	 */
+
+	obj->trans = SWITCHDEV_TRANS_COMMIT;
+	err = __switchdev_port_obj_add(dev, obj);
+	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
+
+/**
+ *	switchdev_port_obj_del - Delete port object
+ *
+ *	@dev: port device
+ *	@obj: object to delete
+ */
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
+{
+	const struct switchdev_ops *ops = dev->switchdev_ops;
+	struct net_device *lower_dev;
+	struct list_head *iter;
+	int err = -EOPNOTSUPP;
+
+	if (ops && ops->switchdev_port_obj_del)
+		return ops->switchdev_port_obj_del(dev, obj);
+
+	/* Switch device port(s) may be stacked under
+	 * bond/team/vlan dev, so recurse down to delete object on
+	 * each port.
+	 */
+
+	netdev_for_each_lower_dev(dev, lower_dev, iter) {
+		err = switchdev_port_obj_del(lower_dev, obj);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+
+/**
+ *	switchdev_port_obj_dump - Dump port objects
+ *
+ *	@dev: port device
+ *	@obj: object to dump
+ */
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
+{
+	const struct switchdev_ops *ops = dev->switchdev_ops;
+	struct net_device *lower_dev;
+	struct list_head *iter;
+	int err = -EOPNOTSUPP;
+
+	if (ops && ops->switchdev_port_obj_dump)
+		return ops->switchdev_port_obj_dump(dev, obj);
+
+	/* Switch device port(s) may be stacked under
+	 * bond/team/vlan dev, so recurse down to dump objects on
+	 * first port at bottom of stack.
+	 */
+
+	netdev_for_each_lower_dev(dev, lower_dev, iter) {
+		err = switchdev_port_obj_dump(lower_dev, obj);
+		break;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
+
+static DEFINE_MUTEX(switchdev_mutex);
+static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
+
+/**
+ *	register_switchdev_notifier - Register notifier
  *	@nb: notifier_block
  *
  *	Register switch device notifier. This should be used by code
  *	which needs to monitor events happening in particular device.
  *	Return values are same as for atomic_notifier_chain_register().
  */
-int register_netdev_switch_notifier(struct notifier_block *nb)
+int register_switchdev_notifier(struct notifier_block *nb)
 {
 	int err;
 
-	mutex_lock(&netdev_switch_mutex);
-	err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
-	mutex_unlock(&netdev_switch_mutex);
+	mutex_lock(&switchdev_mutex);
+	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
+	mutex_unlock(&switchdev_mutex);
 	return err;
 }
-EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 
 /**
- *	unregister_netdev_switch_notifier - Unregister notifier
+ *	unregister_switchdev_notifier - Unregister notifier
  *	@nb: notifier_block
  *
  *	Unregister switch device notifier.
  *	Return values are same as for atomic_notifier_chain_unregister().
  */
-int unregister_netdev_switch_notifier(struct notifier_block *nb)
+int unregister_switchdev_notifier(struct notifier_block *nb)
 {
 	int err;
 
-	mutex_lock(&netdev_switch_mutex);
-	err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
-	mutex_unlock(&netdev_switch_mutex);
+	mutex_lock(&switchdev_mutex);
+	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
+	mutex_unlock(&switchdev_mutex);
 	return err;
 }
-EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 
 /**
- *	call_netdev_switch_notifiers - Call notifiers
+ *	call_switchdev_notifiers - Call notifiers
  *	@val: value passed unmodified to notifier function
  *	@dev: port device
  *	@info: notifier information data
@@ -114,146 +379,502 @@
  *	when it needs to propagate hardware event.
  *	Return values are same as for atomic_notifier_call_chain().
  */
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
-				 struct netdev_switch_notifier_info *info)
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+			     struct switchdev_notifier_info *info)
 {
 	int err;
 
 	info->dev = dev;
-	mutex_lock(&netdev_switch_mutex);
-	err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
-	mutex_unlock(&netdev_switch_mutex);
+	mutex_lock(&switchdev_mutex);
+	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+	mutex_unlock(&switchdev_mutex);
 	return err;
 }
-EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
+EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 
-/**
- *	netdev_switch_port_bridge_setlink - Notify switch device port of bridge
- *	port attributes
- *
- *	@dev: port device
- *	@nlh: netlink msg with bridge port attributes
- *	@flags: bridge setlink flags
- *
- *	Notify switch device port of bridge port attributes
- */
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
-				      struct nlmsghdr *nlh, u16 flags)
+struct switchdev_vlan_dump {
+	struct switchdev_obj obj;
+	struct sk_buff *skb;
+	u32 filter_mask;
+	u16 flags;
+	u16 begin;
+	u16 end;
+};
+
+static int switchdev_port_vlan_dump_put(struct net_device *dev,
+					struct switchdev_vlan_dump *dump)
 {
-	const struct net_device_ops *ops = dev->netdev_ops;
+	struct bridge_vlan_info vinfo;
 
-	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+	vinfo.flags = dump->flags;
+
+	if (dump->begin == 0 && dump->end == 0) {
 		return 0;
-
-	if (!ops->ndo_bridge_setlink)
-		return -EOPNOTSUPP;
-
-	return ops->ndo_bridge_setlink(dev, nlh, flags);
-}
-EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink);
-
-/**
- *	netdev_switch_port_bridge_dellink - Notify switch device port of bridge
- *	port attribute delete
- *
- *	@dev: port device
- *	@nlh: netlink msg with bridge port attributes
- *	@flags: bridge setlink flags
- *
- *	Notify switch device port of bridge port attribute delete
- */
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
-				      struct nlmsghdr *nlh, u16 flags)
-{
-	const struct net_device_ops *ops = dev->netdev_ops;
-
-	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-		return 0;
-
-	if (!ops->ndo_bridge_dellink)
-		return -EOPNOTSUPP;
-
-	return ops->ndo_bridge_dellink(dev, nlh, flags);
-}
-EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink);
-
-/**
- *	ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
- *						     op for master devices
- *
- *	@dev: port device
- *	@nlh: netlink msg with bridge port attributes
- *	@flags: bridge setlink flags
- *
- *	Notify master device slaves of bridge port attributes
- */
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
-					       struct nlmsghdr *nlh, u16 flags)
-{
-	struct net_device *lower_dev;
-	struct list_head *iter;
-	int ret = 0, err = 0;
-
-	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-		return ret;
-
-	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = netdev_switch_port_bridge_setlink(lower_dev, nlh, flags);
-		if (err && err != -EOPNOTSUPP)
-			ret = err;
+	} else if (dump->begin == dump->end) {
+		vinfo.vid = dump->begin;
+		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
+			    sizeof(vinfo), &vinfo))
+			return -EMSGSIZE;
+	} else {
+		vinfo.vid = dump->begin;
+		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
+		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
+			    sizeof(vinfo), &vinfo))
+			return -EMSGSIZE;
+		vinfo.vid = dump->end;
+		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
+		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
+		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
+			    sizeof(vinfo), &vinfo))
+			return -EMSGSIZE;
 	}
 
-	return ret;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink);
 
-/**
- *	ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
- *						     op for master devices
- *
- *	@dev: port device
- *	@nlh: netlink msg with bridge port attributes
- *	@flags: bridge dellink flags
- *
- *	Notify master device slaves of bridge port attribute deletes
- */
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
-					       struct nlmsghdr *nlh, u16 flags)
+static int switchdev_port_vlan_dump_cb(struct net_device *dev,
+				       struct switchdev_obj *obj)
 {
-	struct net_device *lower_dev;
-	struct list_head *iter;
-	int ret = 0, err = 0;
+	struct switchdev_vlan_dump *dump =
+		container_of(obj, struct switchdev_vlan_dump, obj);
+	struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan;
+	int err = 0;
 
-	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-		return ret;
+	if (vlan->vid_begin > vlan->vid_end)
+		return -EINVAL;
 
-	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = netdev_switch_port_bridge_dellink(lower_dev, nlh, flags);
-		if (err && err != -EOPNOTSUPP)
-			ret = err;
+	if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
+		dump->flags = vlan->flags;
+		for (dump->begin = dump->end = vlan->vid_begin;
+		     dump->begin <= vlan->vid_end;
+		     dump->begin++, dump->end++) {
+			err = switchdev_port_vlan_dump_put(dev, dump);
+			if (err)
+				return err;
+		}
+	} else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
+		if (dump->begin > vlan->vid_begin &&
+		    dump->begin >= vlan->vid_end) {
+			if ((dump->begin - 1) == vlan->vid_end &&
+			    dump->flags == vlan->flags) {
+				/* prepend */
+				dump->begin = vlan->vid_begin;
+			} else {
+				err = switchdev_port_vlan_dump_put(dev, dump);
+				dump->flags = vlan->flags;
+				dump->begin = vlan->vid_begin;
+				dump->end = vlan->vid_end;
+			}
+		} else if (dump->end <= vlan->vid_begin &&
+		           dump->end < vlan->vid_end) {
+			if ((dump->end  + 1) == vlan->vid_begin &&
+			    dump->flags == vlan->flags) {
+				/* append */
+				dump->end = vlan->vid_end;
+			} else {
+				err = switchdev_port_vlan_dump_put(dev, dump);
+				dump->flags = vlan->flags;
+				dump->begin = vlan->vid_begin;
+				dump->end = vlan->vid_end;
+			}
+		} else {
+			err = -EINVAL;
+		}
 	}
 
-	return ret;
+	return err;
 }
-EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink);
 
-static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
+static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
+				    u32 filter_mask)
 {
-	const struct swdev_ops *ops = dev->swdev_ops;
+	struct switchdev_vlan_dump dump = {
+		.obj = {
+			.id = SWITCHDEV_OBJ_PORT_VLAN,
+			.cb = switchdev_port_vlan_dump_cb,
+		},
+		.skb = skb,
+		.filter_mask = filter_mask,
+	};
+	int err = 0;
+
+	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
+	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
+		err = switchdev_port_obj_dump(dev, &dump.obj);
+		if (err)
+			goto err_out;
+		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
+			/* last one */
+			err = switchdev_port_vlan_dump_put(dev, &dump);
+	}
+
+err_out:
+	return err == -EOPNOTSUPP ? 0 : err;
+}
+
+/**
+ *	switchdev_port_bridge_getlink - Get bridge port attributes
+ *
+ *	@dev: port device
+ *
+ *	Called for SELF on rtnl_bridge_getlink to get bridge port
+ *	attributes.
+ */
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				  struct net_device *dev, u32 filter_mask,
+				  int nlflags)
+{
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+	};
+	u16 mode = BRIDGE_MODE_UNDEF;
+	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+	int err;
+
+	err = switchdev_port_attr_get(dev, &attr);
+	if (err && err != -EOPNOTSUPP)
+		return err;
+
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
+				       attr.u.brport_flags, mask, nlflags,
+				       filter_mask, switchdev_port_vlan_fill);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
+
+static int switchdev_port_br_setflag(struct net_device *dev,
+				     struct nlattr *nlattr,
+				     unsigned long brport_flag)
+{
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+	};
+	u8 flag = nla_get_u8(nlattr);
+	int err;
+
+	err = switchdev_port_attr_get(dev, &attr);
+	if (err)
+		return err;
+
+	if (flag)
+		attr.u.brport_flags |= brport_flag;
+	else
+		attr.u.brport_flags &= ~brport_flag;
+
+	return switchdev_port_attr_set(dev, &attr);
+}
+
+static const struct nla_policy
+switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
+	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
+	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
+	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
+	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
+	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
+	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
+	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
+	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
+	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
+	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
+};
+
+static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
+					      struct nlattr *protinfo)
+{
+	struct nlattr *attr;
+	int rem;
+	int err;
+
+	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
+				  switchdev_port_bridge_policy);
+	if (err)
+		return err;
+
+	nla_for_each_nested(attr, protinfo, rem) {
+		switch (nla_type(attr)) {
+		case IFLA_BRPORT_LEARNING:
+			err = switchdev_port_br_setflag(dev, attr,
+							BR_LEARNING);
+			break;
+		case IFLA_BRPORT_LEARNING_SYNC:
+			err = switchdev_port_br_setflag(dev, attr,
+							BR_LEARNING_SYNC);
+			break;
+		default:
+			err = -EOPNOTSUPP;
+			break;
+		}
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int switchdev_port_br_afspec(struct net_device *dev,
+				    struct nlattr *afspec,
+				    int (*f)(struct net_device *dev,
+					     struct switchdev_obj *obj))
+{
+	struct nlattr *attr;
+	struct bridge_vlan_info *vinfo;
+	struct switchdev_obj obj = {
+		.id = SWITCHDEV_OBJ_PORT_VLAN,
+	};
+	struct switchdev_obj_vlan *vlan = &obj.u.vlan;
+	int rem;
+	int err;
+
+	nla_for_each_nested(attr, afspec, rem) {
+		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
+			continue;
+		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
+			return -EINVAL;
+		vinfo = nla_data(attr);
+		vlan->flags = vinfo->flags;
+		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+			if (vlan->vid_begin)
+				return -EINVAL;
+			vlan->vid_begin = vinfo->vid;
+		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
+			if (!vlan->vid_begin)
+				return -EINVAL;
+			vlan->vid_end = vinfo->vid;
+			if (vlan->vid_end <= vlan->vid_begin)
+				return -EINVAL;
+			err = f(dev, &obj);
+			if (err)
+				return err;
+			memset(vlan, 0, sizeof(*vlan));
+		} else {
+			if (vlan->vid_begin)
+				return -EINVAL;
+			vlan->vid_begin = vinfo->vid;
+			vlan->vid_end = vinfo->vid;
+			err = f(dev, &obj);
+			if (err)
+				return err;
+			memset(vlan, 0, sizeof(*vlan));
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *	switchdev_port_bridge_setlink - Set bridge port attributes
+ *
+ *	@dev: port device
+ *	@nlh: netlink header
+ *	@flags: netlink flags
+ *
+ *	Called for SELF on rtnl_bridge_setlink to set bridge port
+ *	attributes.
+ */
+int switchdev_port_bridge_setlink(struct net_device *dev,
+				  struct nlmsghdr *nlh, u16 flags)
+{
+	struct nlattr *protinfo;
+	struct nlattr *afspec;
+	int err = 0;
+
+	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+				   IFLA_PROTINFO);
+	if (protinfo) {
+		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
+		if (err)
+			return err;
+	}
+
+	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+				 IFLA_AF_SPEC);
+	if (afspec)
+		err = switchdev_port_br_afspec(dev, afspec,
+					       switchdev_port_obj_add);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
+
+/**
+ *	switchdev_port_bridge_dellink - Set bridge port attributes
+ *
+ *	@dev: port device
+ *	@nlh: netlink header
+ *	@flags: netlink flags
+ *
+ *	Called for SELF on rtnl_bridge_dellink to set bridge port
+ *	attributes.
+ */
+int switchdev_port_bridge_dellink(struct net_device *dev,
+				  struct nlmsghdr *nlh, u16 flags)
+{
+	struct nlattr *afspec;
+
+	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+				 IFLA_AF_SPEC);
+	if (afspec)
+		return switchdev_port_br_afspec(dev, afspec,
+						switchdev_port_obj_del);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
+
+/**
+ *	switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
+ *
+ *	@ndmsg: netlink hdr
+ *	@nlattr: netlink attributes
+ *	@dev: port device
+ *	@addr: MAC address to add
+ *	@vid: VLAN to add
+ *
+ *	Add FDB entry to switch device.
+ */
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			   struct net_device *dev, const unsigned char *addr,
+			   u16 vid, u16 nlm_flags)
+{
+	struct switchdev_obj obj = {
+		.id = SWITCHDEV_OBJ_PORT_FDB,
+		.u.fdb = {
+			.addr = addr,
+			.vid = vid,
+		},
+	};
+
+	return switchdev_port_obj_add(dev, &obj);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
+
+/**
+ *	switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
+ *
+ *	@ndmsg: netlink hdr
+ *	@nlattr: netlink attributes
+ *	@dev: port device
+ *	@addr: MAC address to delete
+ *	@vid: VLAN to delete
+ *
+ *	Delete FDB entry from switch device.
+ */
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			   struct net_device *dev, const unsigned char *addr,
+			   u16 vid)
+{
+	struct switchdev_obj obj = {
+		.id = SWITCHDEV_OBJ_PORT_FDB,
+		.u.fdb = {
+			.addr = addr,
+			.vid = vid,
+		},
+	};
+
+	return switchdev_port_obj_del(dev, &obj);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
+
+struct switchdev_fdb_dump {
+	struct switchdev_obj obj;
+	struct sk_buff *skb;
+	struct netlink_callback *cb;
+	int idx;
+};
+
+static int switchdev_port_fdb_dump_cb(struct net_device *dev,
+				      struct switchdev_obj *obj)
+{
+	struct switchdev_fdb_dump *dump =
+		container_of(obj, struct switchdev_fdb_dump, obj);
+	u32 portid = NETLINK_CB(dump->cb->skb).portid;
+	u32 seq = dump->cb->nlh->nlmsg_seq;
+	struct nlmsghdr *nlh;
+	struct ndmsg *ndm;
+
+	if (dump->idx < dump->cb->args[0])
+		goto skip;
+
+	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+			sizeof(*ndm), NLM_F_MULTI);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	ndm = nlmsg_data(nlh);
+	ndm->ndm_family  = AF_BRIDGE;
+	ndm->ndm_pad1    = 0;
+	ndm->ndm_pad2    = 0;
+	ndm->ndm_flags   = NTF_SELF;
+	ndm->ndm_type    = 0;
+	ndm->ndm_ifindex = dev->ifindex;
+	ndm->ndm_state   = NUD_REACHABLE;
+
+	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
+		goto nla_put_failure;
+
+	if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
+		goto nla_put_failure;
+
+	nlmsg_end(dump->skb, nlh);
+
+skip:
+	dump->idx++;
+	return 0;
+
+nla_put_failure:
+	nlmsg_cancel(dump->skb, nlh);
+	return -EMSGSIZE;
+}
+
+/**
+ *	switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
+ *
+ *	@skb: netlink skb
+ *	@cb: netlink callback
+ *	@dev: port device
+ *	@filter_dev: filter device
+ *	@idx:
+ *
+ *	Delete FDB entry from switch device.
+ */
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+			    struct net_device *dev,
+			    struct net_device *filter_dev, int idx)
+{
+	struct switchdev_fdb_dump dump = {
+		.obj = {
+			.id = SWITCHDEV_OBJ_PORT_FDB,
+			.cb = switchdev_port_fdb_dump_cb,
+		},
+		.skb = skb,
+		.cb = cb,
+		.idx = idx,
+	};
+	int err;
+
+	err = switchdev_port_obj_dump(dev, &dump.obj);
+	if (err)
+		return err;
+
+	return dump.idx;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
+
+static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
+{
+	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
 	struct net_device *port_dev;
 	struct list_head *iter;
 
 	/* Recusively search down until we find a sw port dev.
-	 * (A sw port dev supports swdev_parent_id_get).
+	 * (A sw port dev supports switchdev_port_attr_get).
 	 */
 
-	if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD &&
-	    ops && ops->swdev_parent_id_get)
+	if (ops && ops->switchdev_port_attr_get)
 		return dev;
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		port_dev = netdev_switch_get_lowest_dev(lower_dev);
+		port_dev = switchdev_get_lowest_dev(lower_dev);
 		if (port_dev)
 			return port_dev;
 	}
@@ -261,10 +882,12 @@
 	return NULL;
 }
 
-static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
+static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
 {
-	struct netdev_phys_item_id psid;
-	struct netdev_phys_item_id prev_psid;
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+	};
+	struct switchdev_attr prev_attr;
 	struct net_device *dev = NULL;
 	int nhsel;
 
@@ -276,28 +899,29 @@
 		if (!nh->nh_dev)
 			return NULL;
 
-		dev = netdev_switch_get_lowest_dev(nh->nh_dev);
+		dev = switchdev_get_lowest_dev(nh->nh_dev);
 		if (!dev)
 			return NULL;
 
-		if (netdev_switch_parent_id_get(dev, &psid))
+		if (switchdev_port_attr_get(dev, &attr))
 			return NULL;
 
 		if (nhsel > 0) {
-			if (prev_psid.id_len != psid.id_len)
+			if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
 				return NULL;
-			if (memcmp(prev_psid.id, psid.id, psid.id_len))
+			if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
+				   attr.u.ppid.id_len))
 				return NULL;
 		}
 
-		prev_psid = psid;
+		prev_attr = attr;
 	}
 
 	return dev;
 }
 
 /**
- *	netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch
+ *	switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
  *
  *	@dst: route's IPv4 destination address
  *	@dst_len: destination address length (prefix length)
@@ -307,13 +931,24 @@
  *	@nlflags: netlink flags passed in (NLM_F_*)
  *	@tb_id: route table ID
  *
- *	Add IPv4 route entry to switch device.
+ *	Add/modify switch IPv4 route entry.
  */
-int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
-			       u8 tos, u8 type, u32 nlflags, u32 tb_id)
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
 {
+	struct switchdev_obj fib_obj = {
+		.id = SWITCHDEV_OBJ_IPV4_FIB,
+		.u.ipv4_fib = {
+			.dst = dst,
+			.dst_len = dst_len,
+			.fi = fi,
+			.tos = tos,
+			.type = type,
+			.nlflags = nlflags,
+			.tb_id = tb_id,
+		},
+	};
 	struct net_device *dev;
-	const struct swdev_ops *ops;
 	int err = 0;
 
 	/* Don't offload route if using custom ip rules or if
@@ -328,25 +963,20 @@
 	if (fi->fib_net->ipv4.fib_offload_disabled)
 		return 0;
 
-	dev = netdev_switch_get_dev_by_nhs(fi);
+	dev = switchdev_get_dev_by_nhs(fi);
 	if (!dev)
 		return 0;
-	ops = dev->swdev_ops;
 
-	if (ops->swdev_fib_ipv4_add) {
-		err = ops->swdev_fib_ipv4_add(dev, htonl(dst), dst_len,
-					      fi, tos, type, nlflags,
-					      tb_id);
-		if (!err)
-			fi->fib_flags |= RTNH_F_OFFLOAD;
-	}
+	err = switchdev_port_obj_add(dev, &fib_obj);
+	if (!err)
+		fi->fib_flags |= RTNH_F_OFFLOAD;
 
-	return err;
+	return err == -EOPNOTSUPP ? 0 : err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
 
 /**
- *	netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch
+ *	switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
  *
  *	@dst: route's IPv4 destination address
  *	@dst_len: destination address length (prefix length)
@@ -357,38 +987,45 @@
  *
  *	Delete IPv4 route entry from switch device.
  */
-int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
-			       u8 tos, u8 type, u32 tb_id)
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+			   u8 tos, u8 type, u32 tb_id)
 {
+	struct switchdev_obj fib_obj = {
+		.id = SWITCHDEV_OBJ_IPV4_FIB,
+		.u.ipv4_fib = {
+			.dst = dst,
+			.dst_len = dst_len,
+			.fi = fi,
+			.tos = tos,
+			.type = type,
+			.nlflags = 0,
+			.tb_id = tb_id,
+		},
+	};
 	struct net_device *dev;
-	const struct swdev_ops *ops;
 	int err = 0;
 
 	if (!(fi->fib_flags & RTNH_F_OFFLOAD))
 		return 0;
 
-	dev = netdev_switch_get_dev_by_nhs(fi);
+	dev = switchdev_get_dev_by_nhs(fi);
 	if (!dev)
 		return 0;
-	ops = dev->swdev_ops;
 
-	if (ops->swdev_fib_ipv4_del) {
-		err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
-					      fi, tos, type, tb_id);
-		if (!err)
-			fi->fib_flags &= ~RTNH_F_OFFLOAD;
-	}
+	err = switchdev_port_obj_del(dev, &fib_obj);
+	if (!err)
+		fi->fib_flags &= ~RTNH_F_OFFLOAD;
 
-	return err;
+	return err == -EOPNOTSUPP ? 0 : err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
 
 /**
- *	netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation
+ *	switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
  *
  *	@fi: route FIB info structure
  */
-void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+void switchdev_fib_ipv4_abort(struct fib_info *fi)
 {
 	/* There was a problem installing this route to the offload
 	 * device.  For now, until we come up with more refined
@@ -401,4 +1038,4 @@
 	fib_flush_external(fi->fib_net);
 	fi->fib_net->ipv4.fib_offload_disabled = true;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index ba7daa8..48fd3b5 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -38,13 +38,6 @@
 #include "addr.h"
 #include "core.h"
 
-u32 tipc_own_addr(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	return tn->own_addr;
-}
-
 /**
  * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
  */
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 7ba6d5c..93f7c98 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -41,10 +41,18 @@
 #include <linux/tipc.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include "core.h"
 
 #define TIPC_ZONE_MASK		0xff000000u
 #define TIPC_CLUSTER_MASK	0xfffff000u
 
+static inline u32 tipc_own_addr(struct net *net)
+{
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+	return tn->own_addr;
+}
+
 static inline u32 tipc_zone_mask(u32 addr)
 {
 	return addr & TIPC_ZONE_MASK;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index c5cbdcb..4906ca3 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -115,19 +115,15 @@
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_link *bcl = tn->bcl;
-	struct sk_buff *skb = skb_peek(&bcl->backlogq);
 
-	if (skb)
-		bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
-	else
-		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
+	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
 }
 
 u32 tipc_bclink_get_last_sent(struct net *net)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-	return tn->bcl->fsm_msg_cnt;
+	return tn->bcl->silent_intv_cnt;
 }
 
 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -212,16 +208,16 @@
 		 * or both sent and unsent messages (otherwise)
 		 */
 		if (tn->bclink->bcast_nodes.count)
-			acked = tn->bcl->fsm_msg_cnt;
+			acked = tn->bcl->silent_intv_cnt;
 		else
-			acked = tn->bcl->next_out_no;
+			acked = tn->bcl->snd_nxt;
 	} else {
 		/*
 		 * Bail out if specified sequence number does not correspond
 		 * to a message that has been sent and not yet acknowledged
 		 */
 		if (less(acked, buf_seqno(skb)) ||
-		    less(tn->bcl->fsm_msg_cnt, acked) ||
+		    less(tn->bcl->silent_intv_cnt, acked) ||
 		    less_eq(acked, n_ptr->bclink.acked))
 			goto exit;
 	}
@@ -803,9 +799,9 @@
 		goto attr_msg_full;
 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
 		goto attr_msg_full;
 
 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
@@ -866,6 +862,27 @@
 	return 0;
 }
 
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
+{
+	int err;
+	u32 win;
+	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+	if (!attrs[TIPC_NLA_LINK_PROP])
+		return -EINVAL;
+
+	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
+	if (err)
+		return err;
+
+	if (!props[TIPC_NLA_PROP_WIN])
+		return -EOPNOTSUPP;
+
+	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+
+	return tipc_bclink_set_queue_limits(net, win);
+}
+
 int tipc_bclink_init(struct net *net)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -893,7 +910,7 @@
 	__skb_queue_head_init(&bcl->backlogq);
 	__skb_queue_head_init(&bcl->deferdq);
 	skb_queue_head_init(&bcl->wakeupq);
-	bcl->next_out_no = 1;
+	bcl->snd_nxt = 1;
 	spin_lock_init(&bclink->node.lock);
 	__skb_queue_head_init(&bclink->arrvq);
 	skb_queue_head_init(&bclink->inputq);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 4bdc122..3c290a48 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -131,6 +131,7 @@
 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
 void tipc_bclink_wakeup_users(struct net *net);
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 void tipc_bclink_input(struct net *net);
 
 #endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 70e3dac..00bc0e6 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -71,8 +71,7 @@
 	[TIPC_NLA_MEDIA_PROP]		= { .type = NLA_NESTED }
 };
 
-static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
-			   bool shutting_down);
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr);
 
 /**
  * tipc_media_find - locates specified media object by name
@@ -324,7 +323,7 @@
 
 	res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
 	if (res) {
-		bearer_disable(net, b_ptr, false);
+		bearer_disable(net, b_ptr);
 		pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
 			name);
 		return -EINVAL;
@@ -344,7 +343,7 @@
 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
 	pr_info("Resetting bearer <%s>\n", b_ptr->name);
-	tipc_link_reset_list(net, b_ptr->identity);
+	tipc_link_delete_list(net, b_ptr->identity);
 	tipc_disc_reset(net, b_ptr);
 	return 0;
 }
@@ -354,8 +353,7 @@
  *
  * Note: This routine assumes caller holds RTNL lock.
  */
-static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
-			   bool shutting_down)
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	u32 i;
@@ -363,7 +361,7 @@
 	pr_info("Disabling bearer <%s>\n", b_ptr->name);
 	b_ptr->media->disable_media(b_ptr);
 
-	tipc_link_delete_list(net, b_ptr->identity, shutting_down);
+	tipc_link_delete_list(net, b_ptr->identity);
 	if (b_ptr->link_req)
 		tipc_disc_delete(b_ptr->link_req);
 
@@ -541,7 +539,7 @@
 		break;
 	case NETDEV_UNREGISTER:
 	case NETDEV_CHANGENAME:
-		bearer_disable(dev_net(dev), b_ptr, false);
+		bearer_disable(dev_net(dev), b_ptr);
 		break;
 	}
 	return NOTIFY_OK;
@@ -583,7 +581,7 @@
 	for (i = 0; i < MAX_BEARERS; i++) {
 		b_ptr = rtnl_dereference(tn->bearer_list[i]);
 		if (b_ptr) {
-			bearer_disable(net, b_ptr, true);
+			bearer_disable(net, b_ptr);
 			tn->bearer_list[i] = NULL;
 		}
 	}
@@ -747,7 +745,7 @@
 		return -EINVAL;
 	}
 
-	bearer_disable(net, bearer, false);
+	bearer_disable(net, bearer);
 	rtnl_unlock();
 
 	return 0;
@@ -812,7 +810,7 @@
 	char *name;
 	struct tipc_bearer *b;
 	struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
-	struct net *net = genl_info_net(info);
+	struct net *net = sock_net(skb->sk);
 
 	if (!info->attrs[TIPC_NLA_BEARER])
 		return -EINVAL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 5cad243..dc714d9 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -38,9 +38,9 @@
 #define _TIPC_BEARER_H
 
 #include "netlink.h"
+#include "core.h"
 #include <net/genetlink.h>
 
-#define MAX_BEARERS	2
 #define MAX_MEDIA	3
 #define MAX_NODES	4096
 #define WSIZE		32
diff --git a/net/tipc/core.c b/net/tipc/core.c
index be1c9fa..005ba5e 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -68,7 +68,7 @@
 	if (err)
 		goto out_nametbl;
 
-	err = tipc_subscr_start(net);
+	err = tipc_topsrv_start(net);
 	if (err)
 		goto out_subscr;
 	return 0;
@@ -83,7 +83,7 @@
 
 static void __net_exit tipc_exit_net(struct net *net)
 {
-	tipc_subscr_stop(net);
+	tipc_topsrv_stop(net);
 	tipc_net_stop(net);
 	tipc_nametbl_stop(net);
 	tipc_sk_rht_destroy(net);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 3dc68c7..0fcf133 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -60,16 +60,19 @@
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
 
-#include "node.h"
-#include "bearer.h"
-#include "bcast.h"
-#include "netlink.h"
-#include "link.h"
-#include "node.h"
-#include "msg.h"
+struct tipc_node;
+struct tipc_bearer;
+struct tipc_bcbearer;
+struct tipc_bclink;
+struct tipc_link;
+struct tipc_name_table;
+struct tipc_server;
 
 #define TIPC_MOD_VER "2.0.0"
 
+#define NODE_HTABLE_SIZE   512
+#define MAX_BEARERS	   3
+
 extern int tipc_net_id __read_mostly;
 extern int sysctl_tipc_rmem[3] __read_mostly;
 extern int sysctl_tipc_named_timeout __read_mostly;
@@ -106,6 +109,26 @@
 	atomic_t subscription_count;
 };
 
+static inline u16 mod(u16 x)
+{
+	return x & 0xffffu;
+}
+
+static inline int less_eq(u16 left, u16 right)
+{
+	return mod(right - left) < 32768u;
+}
+
+static inline int more(u16 left, u16 right)
+{
+	return !less_eq(left, right);
+}
+
+static inline int less(u16 left, u16 right)
+{
+	return less_eq(left, right) && (mod(right) != mod(left));
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 43a515d..ca8b8e0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -86,7 +86,7 @@
  */
 #define  STARTING_EVT    856384768	/* link processing trigger */
 #define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
-#define  TIMEOUT_EVT     560817u	/* link timer expired */
+#define  SILENCE_EVT     560817u	/* timer dicovered silence from peer */
 
 /*
  * State value stored in 'failover_pkts'
@@ -106,6 +106,7 @@
 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
 static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
+static void link_set_timer(struct tipc_link *link, unsigned long time);
 /*
  *  Simple link routines
  */
@@ -197,11 +198,12 @@
 	}
 
 	/* do all other link processing performed on a periodic basis */
-	link_state_event(l_ptr, TIMEOUT_EVT);
-
+	if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
+		link_state_event(l_ptr, SILENCE_EVT);
+	l_ptr->silent_intv_cnt++;
 	if (skb_queue_len(&l_ptr->backlogq))
 		tipc_link_push_packets(l_ptr);
-
+	link_set_timer(l_ptr, l_ptr->keepalive_intv);
 	tipc_node_unlock(l_ptr->owner);
 	tipc_link_put(l_ptr);
 }
@@ -233,8 +235,8 @@
 
 	if (n_ptr->link_cnt >= MAX_BEARERS) {
 		tipc_addr_string_fill(addr_string, n_ptr->addr);
-		pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
-			n_ptr->link_cnt, addr_string, MAX_BEARERS);
+		pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
+		       n_ptr->link_cnt, addr_string, MAX_BEARERS);
 		return NULL;
 	}
 
@@ -261,7 +263,6 @@
 		/* note: peer i/f name is updated by reset/activate message */
 	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
 	l_ptr->owner = n_ptr;
-	l_ptr->checkpoint = 1;
 	l_ptr->peer_session = INVALID_SESSION;
 	l_ptr->bearer_id = b_ptr->identity;
 	link_set_supervision_props(l_ptr, b_ptr->tolerance);
@@ -280,7 +281,7 @@
 	l_ptr->mtu = l_ptr->advertised_mtu;
 	l_ptr->priority = b_ptr->priority;
 	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-	l_ptr->next_out_no = 1;
+	l_ptr->snd_nxt = 1;
 	__skb_queue_head_init(&l_ptr->transmq);
 	__skb_queue_head_init(&l_ptr->backlogq);
 	__skb_queue_head_init(&l_ptr->deferdq);
@@ -311,8 +312,7 @@
 	tipc_link_put(l);
 }
 
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
-			   bool shutting_down)
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_link *link;
@@ -451,9 +451,9 @@
 
 	if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
 		l_ptr->flags |= LINK_FAILINGOVER;
-		l_ptr->failover_checkpt = l_ptr->next_in_no;
+		l_ptr->failover_checkpt = l_ptr->rcv_nxt;
 		pl->failover_pkts = FIRST_FAILOVER;
-		pl->failover_checkpt = l_ptr->next_in_no;
+		pl->failover_checkpt = l_ptr->rcv_nxt;
 		pl->failover_skb = l_ptr->reasm_buf;
 	} else {
 		kfree_skb(l_ptr->reasm_buf);
@@ -469,36 +469,19 @@
 	tipc_link_purge_backlog(l_ptr);
 	l_ptr->reasm_buf = NULL;
 	l_ptr->rcv_unacked = 0;
-	l_ptr->checkpoint = 1;
-	l_ptr->next_out_no = 1;
-	l_ptr->fsm_msg_cnt = 0;
+	l_ptr->snd_nxt = 1;
+	l_ptr->silent_intv_cnt = 0;
 	l_ptr->stale_count = 0;
 	link_reset_statistics(l_ptr);
 }
 
-void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *l_ptr;
-	struct tipc_node *n_ptr;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
-		tipc_node_lock(n_ptr);
-		l_ptr = n_ptr->links[bearer_id];
-		if (l_ptr)
-			tipc_link_reset(l_ptr);
-		tipc_node_unlock(n_ptr);
-	}
-	rcu_read_unlock();
-}
-
 static void link_activate(struct tipc_link *link)
 {
 	struct tipc_node *node = link->owner;
 
-	link->next_in_no = 1;
+	link->rcv_nxt = 1;
 	link->stats.recv_info = 1;
+	link->silent_intv_cnt = 0;
 	tipc_node_link_up(node, link);
 	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
 }
@@ -511,7 +494,7 @@
 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 {
 	struct tipc_link *other;
-	unsigned long cont_intv = l_ptr->cont_intv;
+	unsigned long timer_intv = l_ptr->keepalive_intv;
 
 	if (l_ptr->flags & LINK_STOPPED)
 		return;
@@ -519,45 +502,33 @@
 	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
 		return;		/* Not yet. */
 
-	if (l_ptr->flags & LINK_FAILINGOVER) {
-		if (event == TIMEOUT_EVT)
-			link_set_timer(l_ptr, cont_intv);
+	if (l_ptr->flags & LINK_FAILINGOVER)
 		return;
-	}
 
 	switch (l_ptr->state) {
 	case WORKING_WORKING:
 		switch (event) {
 		case TRAFFIC_MSG_EVT:
 		case ACTIVATE_MSG:
+			l_ptr->silent_intv_cnt = 0;
 			break;
-		case TIMEOUT_EVT:
-			if (l_ptr->next_in_no != l_ptr->checkpoint) {
-				l_ptr->checkpoint = l_ptr->next_in_no;
-				if (tipc_bclink_acks_missing(l_ptr->owner)) {
+		case SILENCE_EVT:
+			if (!l_ptr->silent_intv_cnt) {
+				if (tipc_bclink_acks_missing(l_ptr->owner))
 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
 							     0, 0, 0, 0);
-					l_ptr->fsm_msg_cnt++;
-				}
-				link_set_timer(l_ptr, cont_intv);
 				break;
 			}
 			l_ptr->state = WORKING_UNKNOWN;
-			l_ptr->fsm_msg_cnt = 0;
 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv / 4);
 			break;
 		case RESET_MSG:
 			pr_debug("%s<%s>, requested by peer\n",
 				 link_rst_msg, l_ptr->name);
 			tipc_link_reset(l_ptr);
 			l_ptr->state = RESET_RESET;
-			l_ptr->fsm_msg_cnt = 0;
 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
 					     0, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv);
 			break;
 		default:
 			pr_debug("%s%u in WW state\n", link_unk_evt, event);
@@ -568,46 +539,33 @@
 		case TRAFFIC_MSG_EVT:
 		case ACTIVATE_MSG:
 			l_ptr->state = WORKING_WORKING;
-			l_ptr->fsm_msg_cnt = 0;
-			link_set_timer(l_ptr, cont_intv);
+			l_ptr->silent_intv_cnt = 0;
 			break;
 		case RESET_MSG:
 			pr_debug("%s<%s>, requested by peer while probing\n",
 				 link_rst_msg, l_ptr->name);
 			tipc_link_reset(l_ptr);
 			l_ptr->state = RESET_RESET;
-			l_ptr->fsm_msg_cnt = 0;
 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
 					     0, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv);
 			break;
-		case TIMEOUT_EVT:
-			if (l_ptr->next_in_no != l_ptr->checkpoint) {
+		case SILENCE_EVT:
+			if (!l_ptr->silent_intv_cnt) {
 				l_ptr->state = WORKING_WORKING;
-				l_ptr->fsm_msg_cnt = 0;
-				l_ptr->checkpoint = l_ptr->next_in_no;
-				if (tipc_bclink_acks_missing(l_ptr->owner)) {
+				if (tipc_bclink_acks_missing(l_ptr->owner))
 					tipc_link_proto_xmit(l_ptr, STATE_MSG,
 							     0, 0, 0, 0);
-					l_ptr->fsm_msg_cnt++;
-				}
-				link_set_timer(l_ptr, cont_intv);
-			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+			} else if (l_ptr->silent_intv_cnt <
+				   l_ptr->abort_limit) {
 				tipc_link_proto_xmit(l_ptr, STATE_MSG,
 						     1, 0, 0, 0);
-				l_ptr->fsm_msg_cnt++;
-				link_set_timer(l_ptr, cont_intv / 4);
 			} else {	/* Link has failed */
 				pr_debug("%s<%s>, peer not responding\n",
 					 link_rst_msg, l_ptr->name);
 				tipc_link_reset(l_ptr);
 				l_ptr->state = RESET_UNKNOWN;
-				l_ptr->fsm_msg_cnt = 0;
 				tipc_link_proto_xmit(l_ptr, RESET_MSG,
 						     0, 0, 0, 0);
-				l_ptr->fsm_msg_cnt++;
-				link_set_timer(l_ptr, cont_intv);
 			}
 			break;
 		default:
@@ -623,31 +581,22 @@
 			if (other && link_working_unknown(other))
 				break;
 			l_ptr->state = WORKING_WORKING;
-			l_ptr->fsm_msg_cnt = 0;
 			link_activate(l_ptr);
 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
 			if (l_ptr->owner->working_links == 1)
 				tipc_link_sync_xmit(l_ptr);
-			link_set_timer(l_ptr, cont_intv);
 			break;
 		case RESET_MSG:
 			l_ptr->state = RESET_RESET;
-			l_ptr->fsm_msg_cnt = 0;
 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
 					     1, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv);
 			break;
 		case STARTING_EVT:
 			l_ptr->flags |= LINK_STARTED;
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv);
+			link_set_timer(l_ptr, timer_intv);
 			break;
-		case TIMEOUT_EVT:
+		case SILENCE_EVT:
 			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv);
 			break;
 		default:
 			pr_err("%s%u in RU state\n", link_unk_evt, event);
@@ -661,21 +610,16 @@
 			if (other && link_working_unknown(other))
 				break;
 			l_ptr->state = WORKING_WORKING;
-			l_ptr->fsm_msg_cnt = 0;
 			link_activate(l_ptr);
 			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
 			if (l_ptr->owner->working_links == 1)
 				tipc_link_sync_xmit(l_ptr);
-			link_set_timer(l_ptr, cont_intv);
 			break;
 		case RESET_MSG:
 			break;
-		case TIMEOUT_EVT:
+		case SILENCE_EVT:
 			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
 					     0, 0, 0, 0);
-			l_ptr->fsm_msg_cnt++;
-			link_set_timer(l_ptr, cont_intv);
 			break;
 		default:
 			pr_err("%s%u in RR state\n", link_unk_evt, event);
@@ -701,53 +645,58 @@
 {
 	struct tipc_msg *msg = buf_msg(skb_peek(list));
 	unsigned int maxwin = link->window;
-	unsigned int imp = msg_importance(msg);
+	unsigned int i, imp = msg_importance(msg);
 	uint mtu = link->mtu;
-	uint ack = mod(link->next_in_no - 1);
-	uint seqno = link->next_out_no;
-	uint bc_last_in = link->owner->bclink.last_in;
+	u16 ack = mod(link->rcv_nxt - 1);
+	u16 seqno = link->snd_nxt;
+	u16 bc_last_in = link->owner->bclink.last_in;
 	struct tipc_media_addr *addr = &link->media_addr;
 	struct sk_buff_head *transmq = &link->transmq;
 	struct sk_buff_head *backlogq = &link->backlogq;
-	struct sk_buff *skb, *tmp;
+	struct sk_buff *skb, *bskb;
 
-	/* Match backlog limit against msg importance: */
-	if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
-		return link_schedule_user(link, list);
-
+	/* Match msg importance against this and all higher backlog limits: */
+	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
+			return link_schedule_user(link, list);
+	}
 	if (unlikely(msg_size(msg) > mtu)) {
 		__skb_queue_purge(list);
 		return -EMSGSIZE;
 	}
 	/* Prepare each packet for sending, and add to relevant queue: */
-	skb_queue_walk_safe(list, skb, tmp) {
-		__skb_unlink(skb, list);
+	while (skb_queue_len(list)) {
+		skb = skb_peek(list);
 		msg = buf_msg(skb);
 		msg_set_seqno(msg, seqno);
 		msg_set_ack(msg, ack);
 		msg_set_bcast_ack(msg, bc_last_in);
 
 		if (likely(skb_queue_len(transmq) < maxwin)) {
+			__skb_dequeue(list);
 			__skb_queue_tail(transmq, skb);
 			tipc_bearer_send(net, link->bearer_id, skb, addr);
 			link->rcv_unacked = 0;
 			seqno++;
 			continue;
 		}
-		if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
+		if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
+			kfree_skb(__skb_dequeue(list));
 			link->stats.sent_bundled++;
 			continue;
 		}
-		if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
+		if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
+			kfree_skb(__skb_dequeue(list));
+			__skb_queue_tail(backlogq, bskb);
+			link->backlog[msg_importance(buf_msg(bskb))].len++;
 			link->stats.sent_bundled++;
 			link->stats.sent_bundles++;
-			imp = msg_importance(buf_msg(skb));
+			continue;
 		}
-		__skb_queue_tail(backlogq, skb);
-		link->backlog[imp].len++;
-		seqno++;
+		link->backlog[imp].len += skb_queue_len(list);
+		skb_queue_splice_tail_init(list, backlogq);
 	}
-	link->next_out_no = seqno;
+	link->snd_nxt = seqno;
 	return 0;
 }
 
@@ -877,7 +826,8 @@
 {
 	struct sk_buff *skb;
 	struct tipc_msg *msg;
-	unsigned int ack = mod(link->next_in_no - 1);
+	u16 seqno = link->snd_nxt;
+	u16 ack = mod(link->rcv_nxt - 1);
 
 	while (skb_queue_len(&link->transmq) < link->window) {
 		skb = __skb_dequeue(&link->backlogq);
@@ -886,12 +836,15 @@
 		msg = buf_msg(skb);
 		link->backlog[msg_importance(msg)].len--;
 		msg_set_ack(msg, ack);
+		msg_set_seqno(msg, seqno);
+		seqno = mod(seqno + 1);
 		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
 		link->rcv_unacked = 0;
 		__skb_queue_tail(&link->transmq, skb);
 		tipc_bearer_send(link->owner->net, link->bearer_id,
 				 skb, &link->media_addr);
 	}
+	link->snd_nxt = seqno;
 }
 
 void tipc_link_reset_all(struct tipc_node *node)
@@ -964,13 +917,13 @@
 	msg = buf_msg(skb);
 
 	/* Detect repeated retransmit failures */
-	if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+	if (l_ptr->last_retransm == msg_seqno(msg)) {
 		if (++l_ptr->stale_count > 100) {
 			link_retransmit_failure(l_ptr, skb);
 			return;
 		}
 	} else {
-		l_ptr->last_retransmitted = msg_seqno(msg);
+		l_ptr->last_retransm = msg_seqno(msg);
 		l_ptr->stale_count = 1;
 	}
 
@@ -978,7 +931,7 @@
 		if (!retransmits)
 			break;
 		msg = buf_msg(skb);
-		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
 		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
 				 &l_ptr->media_addr);
@@ -1001,11 +954,11 @@
 		goto synched;
 
 	/* Was last pre-synch packet added to input queue ? */
-	if (less_eq(pl->next_in_no, l->synch_point))
+	if (less_eq(pl->rcv_nxt, l->synch_point))
 		return false;
 
 	/* Is it still in the input queue ? */
-	post_synch = mod(pl->next_in_no - l->synch_point) - 1;
+	post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
 	if (skb_queue_len(&pl->inputq) > post_synch)
 		return false;
 synched:
@@ -1016,13 +969,13 @@
 static void link_retrieve_defq(struct tipc_link *link,
 			       struct sk_buff_head *list)
 {
-	u32 seq_no;
+	u16 seq_no;
 
 	if (skb_queue_empty(&link->deferdq))
 		return;
 
 	seq_no = buf_seqno(skb_peek(&link->deferdq));
-	if (seq_no == mod(link->next_in_no))
+	if (seq_no == link->rcv_nxt)
 		skb_queue_splice_tail_init(&link->deferdq, list);
 }
 
@@ -1043,8 +996,8 @@
 	struct tipc_link *l_ptr;
 	struct sk_buff *skb1, *tmp;
 	struct tipc_msg *msg;
-	u32 seq_no;
-	u32 ackd;
+	u16 seq_no;
+	u16 ackd;
 	u32 released;
 
 	skb2list(skb, &head);
@@ -1137,18 +1090,20 @@
 		}
 
 		/* Link is now in state WORKING_WORKING */
-		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
+		if (unlikely(seq_no != l_ptr->rcv_nxt)) {
 			link_handle_out_of_seq_msg(l_ptr, skb);
 			link_retrieve_defq(l_ptr, &head);
 			skb = NULL;
 			goto unlock;
 		}
+		l_ptr->silent_intv_cnt = 0;
+
 		/* Synchronize with parallel link if applicable */
 		if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
 			if (!link_synch(l_ptr))
 				goto unlock;
 		}
-		l_ptr->next_in_no++;
+		l_ptr->rcv_nxt++;
 		if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
 			link_retrieve_defq(l_ptr, &head);
 		if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
@@ -1268,7 +1223,7 @@
 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 {
 	struct sk_buff *skb1;
-	u32 seq_no = buf_seqno(skb);
+	u16 seq_no = buf_seqno(skb);
 
 	/* Empty queue ? */
 	if (skb_queue_empty(list)) {
@@ -1284,7 +1239,7 @@
 
 	/* Locate insertion point in queue, then insert; discard if duplicate */
 	skb_queue_walk(list, skb1) {
-		u32 curr_seqno = buf_seqno(skb1);
+		u16 curr_seqno = buf_seqno(skb1);
 
 		if (seq_no == curr_seqno) {
 			kfree_skb(skb);
@@ -1312,14 +1267,14 @@
 		return;
 	}
 
-	/* Record OOS packet arrival (force mismatch on next timeout) */
-	l_ptr->checkpoint--;
+	/* Record OOS packet arrival */
+	l_ptr->silent_intv_cnt = 0;
 
 	/*
 	 * Discard packet if a duplicate; otherwise add it to deferred queue
 	 * and notify peer of gap as per protocol specification
 	 */
-	if (less(seq_no, mod(l_ptr->next_in_no))) {
+	if (less(seq_no, l_ptr->rcv_nxt)) {
 		l_ptr->stats.duplicates++;
 		kfree_skb(buf);
 		return;
@@ -1344,6 +1299,7 @@
 	struct tipc_msg *msg = l_ptr->pmsg;
 	u32 msg_size = sizeof(l_ptr->proto_msg);
 	int r_flag;
+	u16 last_rcv;
 
 	/* Don't send protocol message during link failover */
 	if (l_ptr->flags & LINK_FAILINGOVER)
@@ -1360,16 +1316,14 @@
 	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
 
 	if (msg_typ == STATE_MSG) {
-		u32 next_sent = mod(l_ptr->next_out_no);
+		u16 next_sent = l_ptr->snd_nxt;
 
 		if (!tipc_link_is_up(l_ptr))
 			return;
-		if (skb_queue_len(&l_ptr->backlogq))
-			next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
 		msg_set_next_sent(msg, next_sent);
 		if (!skb_queue_empty(&l_ptr->deferdq)) {
-			u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
-			gap = mod(rec - mod(l_ptr->next_in_no));
+			last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
+			gap = mod(last_rcv - l_ptr->rcv_nxt);
 		}
 		msg_set_seq_gap(msg, gap);
 		if (gap)
@@ -1377,7 +1331,7 @@
 		msg_set_link_tolerance(msg, tolerance);
 		msg_set_linkprio(msg, priority);
 		msg_set_max_pkt(msg, l_ptr->mtu);
-		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
 		msg_set_probe(msg, probe_msg != 0);
 		if (probe_msg)
 			l_ptr->stats.sent_probes++;
@@ -1397,7 +1351,7 @@
 	msg_set_linkprio(msg, l_ptr->priority);
 	msg_set_size(msg, msg_size);
 
-	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
+	msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
 
 	buf = tipc_buf_acquire(msg_size);
 	if (!buf)
@@ -1496,17 +1450,15 @@
 		}
 
 		/* Record reception; force mismatch at next timeout: */
-		l_ptr->checkpoint--;
+		l_ptr->silent_intv_cnt = 0;
 
 		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
 		l_ptr->stats.recv_states++;
 		if (link_reset_unknown(l_ptr))
 			break;
 
-		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
-			rec_gap = mod(msg_next_sent(msg) -
-				      mod(l_ptr->next_in_no));
-		}
+		if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
+			rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
 
 		if (msg_probe(msg))
 			l_ptr->stats.recv_probes++;
@@ -1580,6 +1532,11 @@
 
 	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
 		      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
+
+	skb_queue_walk(&l_ptr->backlogq, skb) {
+		msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
+		l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
+	}
 	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
 	tipc_link_purge_backlog(l_ptr);
 	msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1640,6 +1597,7 @@
 	struct tipc_msg tnl_hdr;
 	struct sk_buff_head *queue = &link->transmq;
 	int mcnt;
+	u16 seqno;
 
 	tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
 		      SYNCH_MSG, INT_H_SIZE, link->addr);
@@ -1653,7 +1611,7 @@
 		struct tipc_msg *msg = buf_msg(skb);
 		u32 len = msg_size(msg);
 
-		msg_set_ack(msg, mod(link->next_in_no - 1));
+		msg_set_ack(msg, mod(link->rcv_nxt - 1));
 		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
 		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
 		outskb = tipc_buf_acquire(len + INT_H_SIZE);
@@ -1671,6 +1629,11 @@
 	}
 	if (queue == &link->backlogq)
 		return;
+	seqno = link->snd_nxt;
+	skb_queue_walk(&link->backlogq, skb) {
+		msg_set_seqno(buf_msg(skb), seqno);
+		seqno = mod(seqno + 1);
+	}
 	queue = &link->backlogq;
 	goto tunnel_queue;
 }
@@ -1742,8 +1705,8 @@
 		return;
 
 	l_ptr->tolerance = tol;
-	l_ptr->cont_intv = msecs_to_jiffies(intv);
-	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
+	l_ptr->keepalive_intv = msecs_to_jiffies(intv);
+	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1803,8 +1766,8 @@
 static void link_reset_statistics(struct tipc_link *l_ptr)
 {
 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
-	l_ptr->stats.sent_info = l_ptr->next_out_no;
-	l_ptr->stats.recv_info = l_ptr->next_in_no;
+	l_ptr->stats.sent_info = l_ptr->snd_nxt;
+	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
 }
 
 static void link_print(struct tipc_link *l_ptr, const char *str)
@@ -1893,6 +1856,9 @@
 
 	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
+	if (strcmp(name, tipc_bclink_name) == 0)
+		return tipc_nl_bc_link_set(net, attrs);
+
 	node = tipc_link_find_owner(net, name, &bearer_id);
 	if (!node)
 		return -EINVAL;
@@ -2034,9 +2000,9 @@
 		goto attr_msg_full;
 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
 		goto attr_msg_full;
 
 	if (tipc_link_is_up(link))
@@ -2175,50 +2141,53 @@
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
 {
 	struct net *net = genl_info_net(info);
-	struct sk_buff *ans_skb;
 	struct tipc_nl_msg msg;
-	struct tipc_link *link;
-	struct tipc_node *node;
 	char *name;
-	int bearer_id;
 	int err;
 
-	if (!info->attrs[TIPC_NLA_LINK_NAME])
-		return -EINVAL;
-
-	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
-	node = tipc_link_find_owner(net, name, &bearer_id);
-	if (!node)
-		return -EINVAL;
-
-	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
-	if (!ans_skb)
-		return -ENOMEM;
-
-	msg.skb = ans_skb;
 	msg.portid = info->snd_portid;
 	msg.seq = info->snd_seq;
 
-	tipc_node_lock(node);
-	link = node->links[bearer_id];
-	if (!link) {
-		err = -EINVAL;
-		goto err_out;
+	if (!info->attrs[TIPC_NLA_LINK_NAME])
+		return -EINVAL;
+	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
+
+	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg.skb)
+		return -ENOMEM;
+
+	if (strcmp(name, tipc_bclink_name) == 0) {
+		err = tipc_nl_add_bc_link(net, &msg);
+		if (err) {
+			nlmsg_free(msg.skb);
+			return err;
+		}
+	} else {
+		int bearer_id;
+		struct tipc_node *node;
+		struct tipc_link *link;
+
+		node = tipc_link_find_owner(net, name, &bearer_id);
+		if (!node)
+			return -EINVAL;
+
+		tipc_node_lock(node);
+		link = node->links[bearer_id];
+		if (!link) {
+			tipc_node_unlock(node);
+			nlmsg_free(msg.skb);
+			return -EINVAL;
+		}
+
+		err = __tipc_nl_add_link(net, &msg, link, 0);
+		tipc_node_unlock(node);
+		if (err) {
+			nlmsg_free(msg.skb);
+			return err;
+		}
 	}
 
-	err = __tipc_nl_add_link(net, &msg, link, 0);
-	if (err)
-		goto err_out;
-
-	tipc_node_unlock(node);
-
-	return genlmsg_reply(ans_skb, info);
-
-err_out:
-	tipc_node_unlock(node);
-	nlmsg_free(ans_skb);
-
-	return err;
+	return genlmsg_reply(msg.skb, info);
 }
 
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/link.h b/net/tipc/link.h
index b5b4e35..0c02c97 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -107,30 +107,29 @@
  * @owner: pointer to peer node
  * @refcnt: reference counter for permanent references (owner node & timer)
  * @flags: execution state flags for link endpoint instance
- * @checkpoint: reference point for triggering link continuity checking
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
  * @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @cont_intv: link continuity testing interval
+ * @keepalive_intv: link keepalive timer interval
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
  * @state: current state of link FSM
- * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
+ * @silent_intv_cnt: # of timer intervals without any reception from peer
  * @proto_msg: template for control messages generated by link
  * @pmsg: convenience pointer to "proto_msg" field
  * @priority: current link priority
  * @net_plane: current link network plane ('A' through 'H')
  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
  * @exp_msg_count: # of tunnelled messages expected during link changeover
- * @reset_checkpoint: seq # of last acknowledged message at time of link reset
+ * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
  * @mtu: current maximum packet size for this link
  * @advertised_mtu: advertised own mtu when link is being established
  * @transmitq: queue for sent, non-acked messages
  * @backlogq: queue for messages waiting to be sent
- * @next_out_no: next sequence number to use for outbound messages
+ * @snt_nxt: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
  * @stale_count: # of identical retransmit requests made by peer
- * @next_in_no: next sequence number to expect for inbound messages
+ * @rcv_nxt: next sequence number to expect for inbound messages
  * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
  * @inputq: buffer queue for messages to be delivered upwards
@@ -151,15 +150,14 @@
 
 	/* Management and link supervision data */
 	unsigned int flags;
-	u32 checkpoint;
 	u32 peer_session;
 	u32 peer_bearer_id;
 	u32 bearer_id;
 	u32 tolerance;
-	unsigned long cont_intv;
+	unsigned long keepalive_intv;
 	u32 abort_limit;
 	int state;
-	u32 fsm_msg_cnt;
+	u32 silent_intv_cnt;
 	struct {
 		unchar hdr[INT_H_SIZE];
 		unchar body[TIPC_MAX_IF_NAME];
@@ -185,13 +183,13 @@
 		u16 len;
 		u16 limit;
 	} backlog[5];
-	u32 next_out_no;
+	u16 snd_nxt;
+	u16 last_retransm;
 	u32 window;
-	u32 last_retransmitted;
 	u32 stale_count;
 
 	/* Reception */
-	u32 next_in_no;
+	u16 rcv_nxt;
 	u32 rcv_unacked;
 	struct sk_buff_head deferdq;
 	struct sk_buff_head inputq;
@@ -213,8 +211,7 @@
 			      struct tipc_bearer *b_ptr,
 			      const struct tipc_media_addr *media_addr);
 void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
-			   bool shutting_down);
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
@@ -223,7 +220,6 @@
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
 void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
 		       u32 selector);
 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
@@ -247,39 +243,6 @@
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
 void link_prepare_wakeup(struct tipc_link *l);
 
-/*
- * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
- */
-static inline u32 buf_seqno(struct sk_buff *buf)
-{
-	return msg_seqno(buf_msg(buf));
-}
-
-static inline u32 mod(u32 x)
-{
-	return x & 0xffffu;
-}
-
-static inline int less_eq(u32 left, u32 right)
-{
-	return mod(right - left) < 32768u;
-}
-
-static inline int more(u32 left, u32 right)
-{
-	return !less_eq(left, right);
-}
-
-static inline int less(u32 left, u32 right)
-{
-	return less_eq(left, right) && (mod(right) != mod(left));
-}
-
-static inline u32 lesser(u32 left, u32 right)
-{
-	return less_eq(left, right) ? left : right;
-}
-
 static inline u32 link_own_addr(struct tipc_link *l)
 {
 	return msg_prevnode(l->pmsg);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index c3e96e8..08b4cc7 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -331,16 +331,15 @@
 
 /**
  * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @bskb: the buffer to append to ("bundle")
- * @skb:  buffer to be appended
+ * @skb: the buffer to append to ("bundle")
+ * @msg:  message to be appended
  * @mtu:  max allowable size for the bundle buffer
  * Consumes buffer if successful
  * Returns true if bundling could be performed, otherwise false
  */
-bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
 {
 	struct tipc_msg *bmsg;
-	struct tipc_msg *msg = buf_msg(skb);
 	unsigned int bsz;
 	unsigned int msz = msg_size(msg);
 	u32 start, pad;
@@ -348,9 +347,9 @@
 
 	if (likely(msg_user(msg) == MSG_FRAGMENTER))
 		return false;
-	if (!bskb)
+	if (!skb)
 		return false;
-	bmsg = buf_msg(bskb);
+	bmsg = buf_msg(skb);
 	bsz = msg_size(bmsg);
 	start = align(bsz);
 	pad = start - bsz;
@@ -359,18 +358,20 @@
 		return false;
 	if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
 		return false;
-	if (likely(msg_user(bmsg) != MSG_BUNDLER))
+	if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
 		return false;
-	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
+	if (unlikely(skb_tailroom(skb) < (pad + msz)))
 		return false;
 	if (unlikely(max < (start + msz)))
 		return false;
+	if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
+	    (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+		return false;
 
-	skb_put(bskb, pad + msz);
-	skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
+	skb_put(skb, pad + msz);
+	skb_copy_to_linear_data_offset(skb, start, msg, msz);
 	msg_set_size(bmsg, start + msz);
 	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
-	kfree_skb(skb);
 	return true;
 }
 
@@ -416,18 +417,18 @@
 
 /**
  * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain
- * @skb: buffer to be appended and replaced
+ * @list: the buffer chain, where head is the buffer to replace/append
+ * @skb: buffer to be created, appended to and returned in case of success
+ * @msg: message to be appended
  * @mtu: max allowable size for the bundle buffer, inclusive header
  * @dnode: destination node for message. (Not always present in header)
- * Replaces buffer if successful
  * Returns true if success, otherwise false
  */
-bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
+			  u32 mtu, u32 dnode)
 {
-	struct sk_buff *bskb;
+	struct sk_buff *_skb;
 	struct tipc_msg *bmsg;
-	struct tipc_msg *msg = buf_msg(*skb);
 	u32 msz = msg_size(msg);
 	u32 max = mtu - INT_H_SIZE;
 
@@ -440,19 +441,23 @@
 	if (msz > (max / 2))
 		return false;
 
-	bskb = tipc_buf_acquire(max);
-	if (!bskb)
+	_skb = tipc_buf_acquire(max);
+	if (!_skb)
 		return false;
 
-	skb_trim(bskb, INT_H_SIZE);
-	bmsg = buf_msg(bskb);
+	skb_trim(_skb, INT_H_SIZE);
+	bmsg = buf_msg(_skb);
 	tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
 		      INT_H_SIZE, dnode);
+	if (msg_isdata(msg))
+		msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+	else
+		msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
 	msg_set_seqno(bmsg, msg_seqno(msg));
 	msg_set_ack(bmsg, msg_ack(msg));
 	msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
-	tipc_msg_bundle(bskb, *skb, mtu);
-	*skb = bskb;
+	tipc_msg_bundle(_skb, msg, mtu);
+	*skb = _skb;
 	return true;
 }
 
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index e1d3595e..19c45fb 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -313,12 +313,12 @@
 	msg_set_bits(m, 1, 19, 0x3, n);
 }
 
-static inline u32 msg_bcast_ack(struct tipc_msg *m)
+static inline u16 msg_bcast_ack(struct tipc_msg *m)
 {
 	return msg_bits(m, 1, 0, 0xffff);
 }
 
-static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 1, 0, 0xffff, n);
 }
@@ -327,22 +327,22 @@
 /*
  * Word 2
  */
-static inline u32 msg_ack(struct tipc_msg *m)
+static inline u16 msg_ack(struct tipc_msg *m)
 {
 	return msg_bits(m, 2, 16, 0xffff);
 }
 
-static inline void msg_set_ack(struct tipc_msg *m, u32 n)
+static inline void msg_set_ack(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 2, 16, 0xffff, n);
 }
 
-static inline u32 msg_seqno(struct tipc_msg *m)
+static inline u16 msg_seqno(struct tipc_msg *m)
 {
 	return msg_bits(m, 2, 0, 0xffff);
 }
 
-static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
+static inline void msg_set_seqno(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 2, 0, 0xffff, n);
 }
@@ -352,18 +352,22 @@
  */
 static inline u32 msg_importance(struct tipc_msg *m)
 {
-	if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+	int usr = msg_user(m);
+
+	if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
+		return usr;
+	if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
 		return msg_bits(m, 5, 13, 0x7);
-	if (likely(msg_isdata(m) && !msg_errcode(m)))
-		return msg_user(m);
 	return TIPC_SYSTEM_IMPORTANCE;
 }
 
 static inline void msg_set_importance(struct tipc_msg *m, u32 i)
 {
-	if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+	int usr = msg_user(m);
+
+	if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
 		msg_set_bits(m, 5, 13, 0x7, i);
-	else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
+	else if (i < TIPC_SYSTEM_IMPORTANCE)
 		msg_set_user(m, i);
 	else
 		pr_warn("Trying to set illegal importance in message\n");
@@ -772,9 +776,9 @@
 				uint data_sz, u32 dnode, u32 onode,
 				u32 dport, u32 oport, int errcode);
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu);
-
-bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode);
+bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
+bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
+			  u32 mtu, u32 dnode);
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
@@ -782,6 +786,11 @@
 			  int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
+static inline u16 buf_seqno(struct sk_buff *skb)
+{
+	return msg_seqno(buf_msg(skb));
+}
+
 /* tipc_skb_peek(): peek and reserve first buffer in list
  * @list: list to be peeked in
  * Returns pointer to first buffer in list, if any
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index ab0ac62..0f47f08 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -330,13 +330,9 @@
 
 	/* Any subscriptions waiting for notification?  */
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
-		tipc_subscr_report_overlap(s,
-					   publ->lower,
-					   publ->upper,
-					   TIPC_PUBLISHED,
-					   publ->ref,
-					   publ->node,
-					   created_subseq);
+		tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
+					    TIPC_PUBLISHED, publ->ref,
+					    publ->node, created_subseq);
 	}
 	return publ;
 }
@@ -404,13 +400,9 @@
 
 	/* Notify any waiting subscriptions */
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
-		tipc_subscr_report_overlap(s,
-					   publ->lower,
-					   publ->upper,
-					   TIPC_WITHDRAWN,
-					   publ->ref,
-					   publ->node,
-					   removed_subseq);
+		tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
+					    TIPC_WITHDRAWN, publ->ref,
+					    publ->node, removed_subseq);
 	}
 
 	return publ;
@@ -432,19 +424,17 @@
 		return;
 
 	while (sseq != &nseq->sseqs[nseq->first_free]) {
-		if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
+		if (tipc_subscrp_check_overlap(s, sseq->lower, sseq->upper)) {
 			struct publication *crs;
 			struct name_info *info = sseq->info;
 			int must_report = 1;
 
 			list_for_each_entry(crs, &info->zone_list, zone_list) {
-				tipc_subscr_report_overlap(s,
-							   sseq->lower,
-							   sseq->upper,
-							   TIPC_PUBLISHED,
-							   crs->ref,
-							   crs->node,
-							   must_report);
+				tipc_subscrp_report_overlap(s, sseq->lower,
+							    sseq->upper,
+							    TIPC_PUBLISHED,
+							    crs->ref, crs->node,
+							    must_report);
 				must_report = 0;
 			}
 		}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a54f3cb..d6d1399 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -40,6 +40,7 @@
 #include "subscr.h"
 #include "socket.h"
 #include "node.h"
+#include "bcast.h"
 
 static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
 	[TIPC_NLA_NET_UNSPEC]	= { .type = NLA_UNSPEC },
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index ce9121e..53e0fee 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@
 	int rep_type;
 	int rep_size;
 	int req_type;
+	struct net *net;
 	struct sk_buff *rep;
 	struct tlv_desc *req;
 	struct sock *dst_sk;
@@ -68,7 +69,8 @@
 
 struct tipc_nl_compat_cmd_doit {
 	int (*doit)(struct sk_buff *skb, struct genl_info *info);
-	int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
+	int (*transcode)(struct tipc_nl_compat_cmd_doit *cmd,
+			 struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
 };
 
 static int tipc_skb_tailroom(struct sk_buff *skb)
@@ -281,7 +283,7 @@
 	if (!trans_buf)
 		return -ENOMEM;
 
-	err = (*cmd->transcode)(trans_buf, msg);
+	err = (*cmd->transcode)(cmd, trans_buf, msg);
 	if (err)
 		goto trans_out;
 
@@ -353,7 +355,8 @@
 			    nla_len(bearer[TIPC_NLA_BEARER_NAME]));
 }
 
-static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
+static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+					struct sk_buff *skb,
 					struct tipc_nl_compat_msg *msg)
 {
 	struct nlattr *prop;
@@ -385,7 +388,8 @@
 	return 0;
 }
 
-static int tipc_nl_compat_bearer_disable(struct sk_buff *skb,
+static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+					 struct sk_buff *skb,
 					 struct tipc_nl_compat_msg *msg)
 {
 	char *name;
@@ -576,11 +580,81 @@
 			    &link_info, sizeof(link_info));
 }
 
-static int tipc_nl_compat_link_set(struct sk_buff *skb,
-				   struct tipc_nl_compat_msg *msg)
+static int __tipc_add_link_prop(struct sk_buff *skb,
+				struct tipc_nl_compat_msg *msg,
+				struct tipc_link_config *lc)
 {
-	struct nlattr *link;
+	switch (msg->cmd) {
+	case TIPC_CMD_SET_LINK_PRI:
+		return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
+	case TIPC_CMD_SET_LINK_TOL:
+		return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
+	case TIPC_CMD_SET_LINK_WINDOW:
+		return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
+	}
+
+	return -EINVAL;
+}
+
+static int tipc_nl_compat_media_set(struct sk_buff *skb,
+				    struct tipc_nl_compat_msg *msg)
+{
 	struct nlattr *prop;
+	struct nlattr *media;
+	struct tipc_link_config *lc;
+
+	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+	media = nla_nest_start(skb, TIPC_NLA_MEDIA);
+	if (!media)
+		return -EMSGSIZE;
+
+	if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
+		return -EMSGSIZE;
+
+	prop = nla_nest_start(skb, TIPC_NLA_MEDIA_PROP);
+	if (!prop)
+		return -EMSGSIZE;
+
+	__tipc_add_link_prop(skb, msg, lc);
+	nla_nest_end(skb, prop);
+	nla_nest_end(skb, media);
+
+	return 0;
+}
+
+static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+				     struct tipc_nl_compat_msg *msg)
+{
+	struct nlattr *prop;
+	struct nlattr *bearer;
+	struct tipc_link_config *lc;
+
+	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+	bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+	if (!bearer)
+		return -EMSGSIZE;
+
+	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
+		return -EMSGSIZE;
+
+	prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
+	if (!prop)
+		return -EMSGSIZE;
+
+	__tipc_add_link_prop(skb, msg, lc);
+	nla_nest_end(skb, prop);
+	nla_nest_end(skb, bearer);
+
+	return 0;
+}
+
+static int __tipc_nl_compat_link_set(struct sk_buff *skb,
+				     struct tipc_nl_compat_msg *msg)
+{
+	struct nlattr *prop;
+	struct nlattr *link;
 	struct tipc_link_config *lc;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -596,24 +670,40 @@
 	if (!prop)
 		return -EMSGSIZE;
 
-	if (msg->cmd == TIPC_CMD_SET_LINK_PRI) {
-		if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value)))
-			return -EMSGSIZE;
-	} else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) {
-		if (nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value)))
-			return -EMSGSIZE;
-	} else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) {
-		if (nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value)))
-			return -EMSGSIZE;
-	}
-
+	__tipc_add_link_prop(skb, msg, lc);
 	nla_nest_end(skb, prop);
 	nla_nest_end(skb, link);
 
 	return 0;
 }
 
-static int tipc_nl_compat_link_reset_stats(struct sk_buff *skb,
+static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+				   struct sk_buff *skb,
+				   struct tipc_nl_compat_msg *msg)
+{
+	struct tipc_link_config *lc;
+	struct tipc_bearer *bearer;
+	struct tipc_media *media;
+
+	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+	media = tipc_media_find(lc->name);
+	if (media) {
+		cmd->doit = &tipc_nl_media_set;
+		return tipc_nl_compat_media_set(skb, msg);
+	}
+
+	bearer = tipc_bearer_find(msg->net, lc->name);
+	if (bearer) {
+		cmd->doit = &tipc_nl_bearer_set;
+		return tipc_nl_compat_bearer_set(skb, msg);
+	}
+
+	return __tipc_nl_compat_link_set(skb, msg);
+}
+
+static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+					   struct sk_buff *skb,
 					   struct tipc_nl_compat_msg *msg)
 {
 	char *name;
@@ -851,7 +941,8 @@
 			    sizeof(node_info));
 }
 
-static int tipc_nl_compat_net_set(struct sk_buff *skb,
+static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd,
+				  struct sk_buff *skb,
 				  struct tipc_nl_compat_msg *msg)
 {
 	u32 val;
@@ -1007,7 +1098,6 @@
 	struct nlmsghdr *req_nlh;
 	struct nlmsghdr *rep_nlh;
 	struct tipc_genlmsghdr *req_userhdr = info->userhdr;
-	struct net *net = genl_info_net(info);
 
 	memset(&msg, 0, sizeof(msg));
 
@@ -1015,6 +1105,7 @@
 	msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
 	msg.cmd = req_userhdr->cmd;
 	msg.dst_sk = info->dst_sk;
+	msg.net = genl_info_net(info);
 
 	if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
@@ -1030,7 +1121,7 @@
 	}
 
 	err = tipc_nl_compat_handle(&msg);
-	if (err == -EOPNOTSUPP)
+	if ((err == -EOPNOTSUPP) || (err == -EPERM))
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
 	else if (err == -EINVAL)
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
@@ -1043,7 +1134,7 @@
 	rep_nlh = nlmsg_hdr(msg.rep);
 	memcpy(rep_nlh, info->nlhdr, len);
 	rep_nlh->nlmsg_len = msg.rep->len;
-	genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid);
+	genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid);
 
 	return err;
 }
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 22c059a..0b1d61a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
@@ -39,6 +39,7 @@
 #include "node.h"
 #include "name_distr.h"
 #include "socket.h"
+#include "bcast.h"
 
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 02d5c20..5a834cf 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,8 +45,6 @@
 /* Out-of-range value for node signature */
 #define INVALID_NODE_SIG	0x10000
 
-#define NODE_HTABLE_SIZE	512
-
 /* Flags used to take different actions according to flag type
  * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
  * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 77ff03e..922e04a 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -309,6 +309,10 @@
 
 	/* Notify that new connection is incoming */
 	newcon->usr_data = s->tipc_conn_new(newcon->conid);
+	if (!newcon->usr_data) {
+		sock_release(newsock);
+		return -ENOMEM;
+	}
 
 	/* Wake up receive process in case of 'SYN+' message */
 	newsock->sk->sk_data_ready(newsock->sk);
@@ -321,7 +325,7 @@
 	struct socket *sock = NULL;
 	int ret;
 
-	ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
+	ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
 	if (ret < 0)
 		return NULL;
 	ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f485600..46b6ed5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -41,6 +41,7 @@
 #include "link.h"
 #include "name_distr.h"
 #include "socket.h"
+#include "bcast.h"
 
 #define SS_LISTENING		-1	/* socket is listening */
 #define SS_READY		-2	/* socket is connectionless */
@@ -342,7 +343,7 @@
 	}
 
 	/* Allocate socket's protocol area */
-	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
+	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
 	if (sk == NULL)
 		return -ENOMEM;
 
@@ -409,7 +410,7 @@
 	struct net *net;
 	struct tipc_sock *tsk;
 	struct sk_buff *skb;
-	u32 dnode, probing_state;
+	u32 dnode;
 
 	/*
 	 * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -447,10 +448,7 @@
 	}
 
 	tipc_sk_withdraw(tsk, 0, NULL);
-	probing_state = tsk->probing_state;
-	if (del_timer_sync(&sk->sk_timer) &&
-	    probing_state != TIPC_CONN_PROBING)
-		sock_put(sk);
+	sk_stop_timer(sk, &sk->sk_timer);
 	tipc_sk_remove(tsk);
 	if (tsk->connected) {
 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 1c147c86..350cca3 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -40,16 +40,21 @@
 
 /**
  * struct tipc_subscriber - TIPC network topology subscriber
+ * @kref: reference counter to tipc_subscription object
  * @conid: connection identifier to server connecting to subscriber
  * @lock: control access to subscriber
- * @subscription_list: list of subscription objects for this subscriber
+ * @subscrp_list: list of subscription objects for this subscriber
  */
 struct tipc_subscriber {
+	struct kref kref;
 	int conid;
 	spinlock_t lock;
-	struct list_head subscription_list;
+	struct list_head subscrp_list;
 };
 
+static void tipc_subscrp_delete(struct tipc_subscription *sub);
+static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+
 /**
  * htohl - convert value to endianness used by destination
  * @in: value to convert
@@ -62,9 +67,9 @@
 	return swap ? swab32(in) : in;
 }
 
-static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
-			      u32 found_upper, u32 event, u32 port_ref,
-			      u32 node)
+static void tipc_subscrp_send_event(struct tipc_subscription *sub,
+				    u32 found_lower, u32 found_upper,
+				    u32 event, u32 port_ref, u32 node)
 {
 	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
 	struct tipc_subscriber *subscriber = sub->subscriber;
@@ -82,12 +87,13 @@
 }
 
 /**
- * tipc_subscr_overlap - test for subscription overlap with the given values
+ * tipc_subscrp_check_overlap - test for subscription overlap with the
+ * given values
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
-int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
-			u32 found_upper)
+int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+			       u32 found_upper)
 {
 	if (found_lower < sub->seq.lower)
 		found_lower = sub->seq.lower;
@@ -98,138 +104,121 @@
 	return 1;
 }
 
-/**
- * tipc_subscr_report_overlap - issue event if there is subscription overlap
- *
- * Protected by nameseq.lock in name_table.c
- */
-void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
-				u32 found_upper, u32 event, u32 port_ref,
-				u32 node, int must)
+void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+				 u32 found_upper, u32 event, u32 port_ref,
+				 u32 node, int must)
 {
-	if (!tipc_subscr_overlap(sub, found_lower, found_upper))
+	if (!tipc_subscrp_check_overlap(sub, found_lower, found_upper))
 		return;
 	if (!must && !(sub->filter & TIPC_SUB_PORTS))
 		return;
 
-	subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
+	tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
+				node);
 }
 
-static void subscr_timeout(unsigned long data)
+static void tipc_subscrp_timeout(unsigned long data)
 {
 	struct tipc_subscription *sub = (struct tipc_subscription *)data;
 	struct tipc_subscriber *subscriber = sub->subscriber;
-	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
-
-	/* The spin lock per subscriber is used to protect its members */
-	spin_lock_bh(&subscriber->lock);
-
-	/* Validate timeout (in case subscription is being cancelled) */
-	if (sub->timeout == TIPC_WAIT_FOREVER) {
-		spin_unlock_bh(&subscriber->lock);
-		return;
-	}
-
-	/* Unlink subscription from name table */
-	tipc_nametbl_unsubscribe(sub);
-
-	/* Unlink subscription from subscriber */
-	list_del(&sub->subscription_list);
-
-	spin_unlock_bh(&subscriber->lock);
 
 	/* Notify subscriber of timeout */
-	subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
-			  TIPC_SUBSCR_TIMEOUT, 0, 0);
+	tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
+				TIPC_SUBSCR_TIMEOUT, 0, 0);
 
-	/* Now destroy subscription */
-	kfree(sub);
-	atomic_dec(&tn->subscription_count);
+	spin_lock_bh(&subscriber->lock);
+	tipc_subscrp_delete(sub);
+	spin_unlock_bh(&subscriber->lock);
+
+	tipc_subscrb_put(subscriber);
 }
 
-/**
- * subscr_del - delete a subscription within a subscription list
- *
- * Called with subscriber lock held.
- */
-static void subscr_del(struct tipc_subscription *sub)
+static void tipc_subscrb_kref_release(struct kref *kref)
+{
+	struct tipc_subscriber *subcriber = container_of(kref,
+					    struct tipc_subscriber, kref);
+
+	kfree(subcriber);
+}
+
+static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
+{
+	kref_put(&subscriber->kref, tipc_subscrb_kref_release);
+}
+
+static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
+{
+	kref_get(&subscriber->kref);
+}
+
+static struct tipc_subscriber *tipc_subscrb_create(int conid)
+{
+	struct tipc_subscriber *subscriber;
+
+	subscriber = kzalloc(sizeof(*subscriber), GFP_ATOMIC);
+	if (!subscriber) {
+		pr_warn("Subscriber rejected, no memory\n");
+		return NULL;
+	}
+	kref_init(&subscriber->kref);
+	INIT_LIST_HEAD(&subscriber->subscrp_list);
+	subscriber->conid = conid;
+	spin_lock_init(&subscriber->lock);
+
+	return subscriber;
+}
+
+static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
+{
+	struct tipc_subscription *sub, *temp;
+
+	spin_lock_bh(&subscriber->lock);
+	/* Destroy any existing subscriptions for subscriber */
+	list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+				 subscrp_list) {
+		if (del_timer(&sub->timer)) {
+			tipc_subscrp_delete(sub);
+			tipc_subscrb_put(subscriber);
+		}
+	}
+	spin_unlock_bh(&subscriber->lock);
+
+	tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_delete(struct tipc_subscription *sub)
 {
 	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
 
 	tipc_nametbl_unsubscribe(sub);
-	list_del(&sub->subscription_list);
+	list_del(&sub->subscrp_list);
 	kfree(sub);
 	atomic_dec(&tn->subscription_count);
 }
 
-static void subscr_release(struct tipc_subscriber *subscriber)
+static void tipc_subscrp_cancel(struct tipc_subscr *s,
+				struct tipc_subscriber *subscriber)
 {
-	struct tipc_subscription *sub;
-	struct tipc_subscription *sub_temp;
+	struct tipc_subscription *sub, *temp;
 
 	spin_lock_bh(&subscriber->lock);
-
-	/* Destroy any existing subscriptions for subscriber */
-	list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
-				 subscription_list) {
-		if (sub->timeout != TIPC_WAIT_FOREVER) {
-			spin_unlock_bh(&subscriber->lock);
-			del_timer_sync(&sub->timer);
-			spin_lock_bh(&subscriber->lock);
-		}
-		subscr_del(sub);
-	}
-	spin_unlock_bh(&subscriber->lock);
-
-	/* Now destroy subscriber */
-	kfree(subscriber);
-}
-
-/**
- * subscr_cancel - handle subscription cancellation request
- *
- * Called with subscriber lock held. Routine must temporarily release lock
- * to enable the subscription timeout routine to finish without deadlocking;
- * the lock is then reclaimed to allow caller to release it upon return.
- *
- * Note that fields of 's' use subscriber's endianness!
- */
-static void subscr_cancel(struct tipc_subscr *s,
-			  struct tipc_subscriber *subscriber)
-{
-	struct tipc_subscription *sub;
-	struct tipc_subscription *sub_temp;
-	int found = 0;
-
 	/* Find first matching subscription, exit if not found */
-	list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
-				 subscription_list) {
+	list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+				 subscrp_list) {
 		if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-			found = 1;
+			if (del_timer(&sub->timer)) {
+				tipc_subscrp_delete(sub);
+				tipc_subscrb_put(subscriber);
+			}
 			break;
 		}
 	}
-	if (!found)
-		return;
-
-	/* Cancel subscription timer (if used), then delete subscription */
-	if (sub->timeout != TIPC_WAIT_FOREVER) {
-		sub->timeout = TIPC_WAIT_FOREVER;
-		spin_unlock_bh(&subscriber->lock);
-		del_timer_sync(&sub->timer);
-		spin_lock_bh(&subscriber->lock);
-	}
-	subscr_del(sub);
+	spin_unlock_bh(&subscriber->lock);
 }
 
-/**
- * subscr_subscribe - create subscription for subscriber
- *
- * Called with subscriber lock held.
- */
-static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
-			    struct tipc_subscriber *subscriber,
-			    struct tipc_subscription **sub_p)
+static int tipc_subscrp_create(struct net *net, struct tipc_subscr *s,
+			       struct tipc_subscriber *subscriber,
+			       struct tipc_subscription **sub_p)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_subscription *sub;
@@ -241,7 +230,7 @@
 	/* Detect & process a subscription cancellation request */
 	if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
 		s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
-		subscr_cancel(s, subscriber);
+		tipc_subscrp_cancel(s, subscriber);
 		return 0;
 	}
 
@@ -273,62 +262,51 @@
 		kfree(sub);
 		return -EINVAL;
 	}
-	list_add(&sub->subscription_list, &subscriber->subscription_list);
+	spin_lock_bh(&subscriber->lock);
+	list_add(&sub->subscrp_list, &subscriber->subscrp_list);
+	spin_unlock_bh(&subscriber->lock);
 	sub->subscriber = subscriber;
 	sub->swap = swap;
-	memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
+	memcpy(&sub->evt.s, s, sizeof(*s));
 	atomic_inc(&tn->subscription_count);
-	if (sub->timeout != TIPC_WAIT_FOREVER) {
-		setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
-		mod_timer(&sub->timer, jiffies + sub->timeout);
-	}
+	setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
+	if (sub->timeout != TIPC_WAIT_FOREVER)
+		sub->timeout += jiffies;
+	if (!mod_timer(&sub->timer, sub->timeout))
+		tipc_subscrb_get(subscriber);
 	*sub_p = sub;
 	return 0;
 }
 
 /* Handle one termination request for the subscriber */
-static void subscr_conn_shutdown_event(int conid, void *usr_data)
+static void tipc_subscrb_shutdown_cb(int conid, void *usr_data)
 {
-	subscr_release((struct tipc_subscriber *)usr_data);
+	tipc_subscrb_delete((struct tipc_subscriber *)usr_data);
 }
 
 /* Handle one request to create a new subscription for the subscriber */
-static void subscr_conn_msg_event(struct net *net, int conid,
-				  struct sockaddr_tipc *addr, void *usr_data,
-				  void *buf, size_t len)
+static void tipc_subscrb_rcv_cb(struct net *net, int conid,
+				struct sockaddr_tipc *addr, void *usr_data,
+				void *buf, size_t len)
 {
 	struct tipc_subscriber *subscriber = usr_data;
 	struct tipc_subscription *sub = NULL;
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-	spin_lock_bh(&subscriber->lock);
-	subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
+	tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
 	if (sub)
 		tipc_nametbl_subscribe(sub);
 	else
 		tipc_conn_terminate(tn->topsrv, subscriber->conid);
-	spin_unlock_bh(&subscriber->lock);
 }
 
 /* Handle one request to establish a new subscriber */
-static void *subscr_named_msg_event(int conid)
+static void *tipc_subscrb_connect_cb(int conid)
 {
-	struct tipc_subscriber *subscriber;
-
-	/* Create subscriber object */
-	subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
-	if (subscriber == NULL) {
-		pr_warn("Subscriber rejected, no memory\n");
-		return NULL;
-	}
-	INIT_LIST_HEAD(&subscriber->subscription_list);
-	subscriber->conid = conid;
-	spin_lock_init(&subscriber->lock);
-
-	return (void *)subscriber;
+	return (void *)tipc_subscrb_create(conid);
 }
 
-int tipc_subscr_start(struct net *net)
+int tipc_topsrv_start(struct net *net)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	const char name[] = "topology_server";
@@ -355,9 +333,9 @@
 	topsrv->imp			= TIPC_CRITICAL_IMPORTANCE;
 	topsrv->type			= SOCK_SEQPACKET;
 	topsrv->max_rcvbuf_size		= sizeof(struct tipc_subscr);
-	topsrv->tipc_conn_recvmsg	= subscr_conn_msg_event;
-	topsrv->tipc_conn_new		= subscr_named_msg_event;
-	topsrv->tipc_conn_shutdown	= subscr_conn_shutdown_event;
+	topsrv->tipc_conn_recvmsg	= tipc_subscrb_rcv_cb;
+	topsrv->tipc_conn_new		= tipc_subscrb_connect_cb;
+	topsrv->tipc_conn_shutdown	= tipc_subscrb_shutdown_cb;
 
 	strncpy(topsrv->name, name, strlen(name) + 1);
 	tn->topsrv = topsrv;
@@ -366,7 +344,7 @@
 	return tipc_server_start(topsrv);
 }
 
-void tipc_subscr_stop(struct net *net)
+void tipc_topsrv_stop(struct net *net)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_server *topsrv = tn->topsrv;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 33488bd..92ee18cc 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -54,7 +54,7 @@
  * @filter: event filtering to be done for subscription
  * @timer: timer governing subscription duration (optional)
  * @nameseq_list: adjacent subscriptions in name sequence's subscription list
- * @subscription_list: adjacent subscriptions in subscriber's subscription list
+ * @subscrp_list: adjacent subscriptions in subscriber's subscription list
  * @server_ref: object reference of server port associated with subscription
  * @swap: indicates if subscriber uses opposite endianness in its messages
  * @evt: template for events generated by subscription
@@ -67,17 +67,17 @@
 	u32 filter;
 	struct timer_list timer;
 	struct list_head nameseq_list;
-	struct list_head subscription_list;
+	struct list_head subscrp_list;
 	int swap;
 	struct tipc_event evt;
 };
 
-int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
-			u32 found_upper);
-void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
-				u32 found_upper, u32 event, u32 port_ref,
-				u32 node, int must);
-int tipc_subscr_start(struct net *net);
-void tipc_subscr_stop(struct net *net);
+int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+			       u32 found_upper);
+void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
+				 u32 found_lower, u32 found_upper, u32 event,
+				 u32 port_ref, u32 node, int must);
+int tipc_topsrv_start(struct net *net);
+void tipc_topsrv_stop(struct net *net);
 
 #endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0643059..03ee4d3 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -140,12 +140,17 @@
 #ifdef CONFIG_SECURITY_NETWORK
 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 {
-	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
+	UNIXCB(skb).secid = scm->secid;
 }
 
 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 {
-	scm->secid = *UNIXSID(skb);
+	scm->secid = UNIXCB(skb).secid;
+}
+
+static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+{
+	return (scm->secid == UNIXCB(skb).secid);
 }
 #else
 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -153,6 +158,11 @@
 
 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
 { }
+
+static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+{
+	return true;
+}
 #endif /* CONFIG_SECURITY_NETWORK */
 
 /*
@@ -518,6 +528,11 @@
 static int unix_shutdown(struct socket *, int);
 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
+static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
+				    size_t size, int flags);
+static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
+				       struct pipe_inode_info *, size_t size,
+				       unsigned int flags);
 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 static int unix_dgram_connect(struct socket *, struct sockaddr *,
@@ -558,7 +573,8 @@
 	.sendmsg =	unix_stream_sendmsg,
 	.recvmsg =	unix_stream_recvmsg,
 	.mmap =		sock_no_mmap,
-	.sendpage =	sock_no_sendpage,
+	.sendpage =	unix_stream_sendpage,
+	.splice_read =	unix_stream_splice_read,
 	.set_peek_off =	unix_set_peek_off,
 };
 
@@ -620,7 +636,7 @@
  */
 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
 
-static struct sock *unix_create1(struct net *net, struct socket *sock)
+static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
 {
 	struct sock *sk = NULL;
 	struct unix_sock *u;
@@ -629,7 +645,7 @@
 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
 		goto out;
 
-	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
+	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
 	if (!sk)
 		goto out;
 
@@ -688,7 +704,7 @@
 		return -ESOCKTNOSUPPORT;
 	}
 
-	return unix_create1(net, sock) ? 0 : -ENOMEM;
+	return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
 }
 
 static int unix_release(struct socket *sock)
@@ -1088,7 +1104,7 @@
 	err = -ENOMEM;
 
 	/* create new sock for complete connection */
-	newsk = unix_create1(sock_net(sk), NULL);
+	newsk = unix_create1(sock_net(sk), NULL, 0);
 	if (newsk == NULL)
 		goto out;
 
@@ -1408,6 +1424,7 @@
 	UNIXCB(skb).uid = scm->creds.uid;
 	UNIXCB(skb).gid = scm->creds.gid;
 	UNIXCB(skb).fp = NULL;
+	unix_get_secdata(scm, skb);
 	if (scm->fp && send_fds)
 		err = unix_attach_fds(scm, skb);
 
@@ -1503,7 +1520,6 @@
 	if (err < 0)
 		goto out_free;
 	max_level = err + 1;
-	unix_get_secdata(&scm, skb);
 
 	skb_put(skb, len - data_len);
 	skb->data_len = data_len;
@@ -1720,6 +1736,101 @@
 	return sent ? : err;
 }
 
+static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+				    int offset, size_t size, int flags)
+{
+	int err = 0;
+	bool send_sigpipe = true;
+	struct sock *other, *sk = socket->sk;
+	struct sk_buff *skb, *newskb = NULL, *tail = NULL;
+
+	if (flags & MSG_OOB)
+		return -EOPNOTSUPP;
+
+	other = unix_peer(sk);
+	if (!other || sk->sk_state != TCP_ESTABLISHED)
+		return -ENOTCONN;
+
+	if (false) {
+alloc_skb:
+		unix_state_unlock(other);
+		mutex_unlock(&unix_sk(other)->readlock);
+		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+					      &err, 0);
+		if (!newskb)
+			return err;
+	}
+
+	/* we must acquire readlock as we modify already present
+	 * skbs in the sk_receive_queue and mess with skb->len
+	 */
+	err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+	if (err) {
+		err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
+		send_sigpipe = false;
+		goto err;
+	}
+
+	if (sk->sk_shutdown & SEND_SHUTDOWN) {
+		err = -EPIPE;
+		goto err_unlock;
+	}
+
+	unix_state_lock(other);
+
+	if (sock_flag(other, SOCK_DEAD) ||
+	    other->sk_shutdown & RCV_SHUTDOWN) {
+		err = -EPIPE;
+		goto err_state_unlock;
+	}
+
+	skb = skb_peek_tail(&other->sk_receive_queue);
+	if (tail && tail == skb) {
+		skb = newskb;
+	} else if (!skb) {
+		if (newskb)
+			skb = newskb;
+		else
+			goto alloc_skb;
+	} else if (newskb) {
+		/* this is fast path, we don't necessarily need to
+		 * call to kfree_skb even though with newskb == NULL
+		 * this - does no harm
+		 */
+		consume_skb(newskb);
+	}
+
+	if (skb_append_pagefrags(skb, page, offset, size)) {
+		tail = skb;
+		goto alloc_skb;
+	}
+
+	skb->len += size;
+	skb->data_len += size;
+	skb->truesize += size;
+	atomic_add(size, &sk->sk_wmem_alloc);
+
+	if (newskb)
+		__skb_queue_tail(&other->sk_receive_queue, newskb);
+
+	unix_state_unlock(other);
+	mutex_unlock(&unix_sk(other)->readlock);
+
+	other->sk_data_ready(other);
+
+	return size;
+
+err_state_unlock:
+	unix_state_unlock(other);
+err_unlock:
+	mutex_unlock(&unix_sk(other)->readlock);
+err:
+	kfree_skb(newskb);
+	if (send_sigpipe && !(flags & MSG_NOSIGNAL))
+		send_sig(SIGPIPE, current, 0);
+	return err;
+}
+
 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
 				  size_t len)
 {
@@ -1860,8 +1971,9 @@
  *	Sleep until more data has arrived. But check for races..
  */
 static long unix_stream_data_wait(struct sock *sk, long timeo,
-				  struct sk_buff *last)
+				  struct sk_buff *last, unsigned int last_len)
 {
+	struct sk_buff *tail;
 	DEFINE_WAIT(wait);
 
 	unix_state_lock(sk);
@@ -1869,7 +1981,9 @@
 	for (;;) {
 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 
-		if (skb_peek_tail(&sk->sk_receive_queue) != last ||
+		tail = skb_peek_tail(&sk->sk_receive_queue);
+		if (tail != last ||
+		    (tail && tail->len != last_len) ||
 		    sk->sk_err ||
 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
 		    signal_pending(current) ||
@@ -1897,38 +2011,50 @@
 	return skb->len - UNIXCB(skb).consumed;
 }
 
-static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
-			       size_t size, int flags)
+struct unix_stream_read_state {
+	int (*recv_actor)(struct sk_buff *, int, int,
+			  struct unix_stream_read_state *);
+	struct socket *socket;
+	struct msghdr *msg;
+	struct pipe_inode_info *pipe;
+	size_t size;
+	int flags;
+	unsigned int splice_flags;
+};
+
+static int unix_stream_read_generic(struct unix_stream_read_state *state)
 {
 	struct scm_cookie scm;
+	struct socket *sock = state->socket;
 	struct sock *sk = sock->sk;
 	struct unix_sock *u = unix_sk(sk);
-	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
 	int copied = 0;
+	int flags = state->flags;
 	int noblock = flags & MSG_DONTWAIT;
-	int check_creds = 0;
+	bool check_creds = false;
 	int target;
 	int err = 0;
 	long timeo;
 	int skip;
+	size_t size = state->size;
+	unsigned int last_len;
 
 	err = -EINVAL;
 	if (sk->sk_state != TCP_ESTABLISHED)
 		goto out;
 
 	err = -EOPNOTSUPP;
-	if (flags&MSG_OOB)
+	if (flags & MSG_OOB)
 		goto out;
 
-	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
 	timeo = sock_rcvtimeo(sk, noblock);
 
+	memset(&scm, 0, sizeof(scm));
+
 	/* Lock the socket to prevent queue disordering
 	 * while sleeps in memcpy_tomsg
 	 */
-
-	memset(&scm, 0, sizeof(scm));
-
 	err = mutex_lock_interruptible(&u->readlock);
 	if (unlikely(err)) {
 		/* recvmsg() in non blocking mode is supposed to return -EAGAIN
@@ -1948,6 +2074,7 @@
 			goto unlock;
 		}
 		last = skb = skb_peek(&sk->sk_receive_queue);
+		last_len = last ? last->len : 0;
 again:
 		if (skb == NULL) {
 			unix_sk(sk)->recursion_level = 0;
@@ -1970,16 +2097,17 @@
 				break;
 			mutex_unlock(&u->readlock);
 
-			timeo = unix_stream_data_wait(sk, timeo, last);
+			timeo = unix_stream_data_wait(sk, timeo, last,
+						      last_len);
 
-			if (signal_pending(current)
-			    ||  mutex_lock_interruptible(&u->readlock)) {
+			if (signal_pending(current) ||
+			    mutex_lock_interruptible(&u->readlock)) {
 				err = sock_intr_errno(timeo);
 				goto out;
 			}
 
 			continue;
- unlock:
+unlock:
 			unix_state_unlock(sk);
 			break;
 		}
@@ -1988,6 +2116,7 @@
 		while (skip >= unix_skb_len(skb)) {
 			skip -= unix_skb_len(skb);
 			last = skb;
+			last_len = skb->len;
 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
 			if (!skb)
 				goto again;
@@ -1999,23 +2128,27 @@
 			/* Never glue messages from different writers */
 			if ((UNIXCB(skb).pid  != scm.pid) ||
 			    !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
-			    !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
+			    !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
+			    !unix_secdata_eq(&scm, skb))
 				break;
 		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
 			/* Copy credentials */
 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
-			check_creds = 1;
+			unix_set_secdata(&scm, skb);
+			check_creds = true;
 		}
 
 		/* Copy address just once */
-		if (sunaddr) {
-			unix_copy_addr(msg, skb->sk);
+		if (state->msg && state->msg->msg_name) {
+			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
+					 state->msg->msg_name);
+			unix_copy_addr(state->msg, skb->sk);
 			sunaddr = NULL;
 		}
 
 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
-		if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
-					  msg, chunk)) {
+		chunk = state->recv_actor(skb, skip, chunk, state);
+		if (chunk < 0) {
 			if (copied == 0)
 				copied = -EFAULT;
 			break;
@@ -2053,11 +2186,85 @@
 	} while (size);
 
 	mutex_unlock(&u->readlock);
-	scm_recv(sock, msg, &scm, flags);
+	if (state->msg)
+		scm_recv(sock, state->msg, &scm, flags);
+	else
+		scm_destroy(&scm);
 out:
 	return copied ? : err;
 }
 
+static int unix_stream_read_actor(struct sk_buff *skb,
+				  int skip, int chunk,
+				  struct unix_stream_read_state *state)
+{
+	int ret;
+
+	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
+				    state->msg, chunk);
+	return ret ?: chunk;
+}
+
+static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+			       size_t size, int flags)
+{
+	struct unix_stream_read_state state = {
+		.recv_actor = unix_stream_read_actor,
+		.socket = sock,
+		.msg = msg,
+		.size = size,
+		.flags = flags
+	};
+
+	return unix_stream_read_generic(&state);
+}
+
+static ssize_t skb_unix_socket_splice(struct sock *sk,
+				      struct pipe_inode_info *pipe,
+				      struct splice_pipe_desc *spd)
+{
+	int ret;
+	struct unix_sock *u = unix_sk(sk);
+
+	mutex_unlock(&u->readlock);
+	ret = splice_to_pipe(pipe, spd);
+	mutex_lock(&u->readlock);
+
+	return ret;
+}
+
+static int unix_stream_splice_actor(struct sk_buff *skb,
+				    int skip, int chunk,
+				    struct unix_stream_read_state *state)
+{
+	return skb_splice_bits(skb, state->socket->sk,
+			       UNIXCB(skb).consumed + skip,
+			       state->pipe, chunk, state->splice_flags,
+			       skb_unix_socket_splice);
+}
+
+static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
+				       struct pipe_inode_info *pipe,
+				       size_t size, unsigned int flags)
+{
+	struct unix_stream_read_state state = {
+		.recv_actor = unix_stream_splice_actor,
+		.socket = sock,
+		.pipe = pipe,
+		.size = size,
+		.splice_flags = flags,
+	};
+
+	if (unlikely(*ppos))
+		return -ESPIPE;
+
+	if (sock->file->f_flags & O_NONBLOCK ||
+	    flags & SPLICE_F_NONBLOCK)
+		state.flags = MSG_DONTWAIT;
+
+	return unix_stream_read_generic(&state);
+}
+
 static int unix_shutdown(struct socket *sock, int mode)
 {
 	struct sock *sk = sock->sk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 2ec86e6..df5fc6b 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -581,13 +581,14 @@
 			    struct socket *sock,
 			    struct sock *parent,
 			    gfp_t priority,
-			    unsigned short type)
+			    unsigned short type,
+			    int kern)
 {
 	struct sock *sk;
 	struct vsock_sock *psk;
 	struct vsock_sock *vsk;
 
-	sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
+	sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
 	if (!sk)
 		return NULL;
 
@@ -1866,7 +1867,7 @@
 
 	sock->state = SS_UNCONNECTED;
 
-	return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
+	return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
 }
 
 static const struct net_proto_family vsock_family_ops = {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c294da0..1f63daf 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1022,7 +1022,7 @@
 	}
 
 	pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
-				 sk->sk_type);
+				 sk->sk_type, 0);
 	if (!pending) {
 		vmci_transport_send_reset(sk, pkt);
 		return -ENOMEM;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 7aaf741..915b328 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -698,19 +698,20 @@
 EXPORT_SYMBOL(cfg80211_chandef_usable);
 
 /*
- * For GO only, check if the channel can be used under permissive conditions
- * mandated by the some regulatory bodies, i.e., the channel is marked with
- * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
+ * Check if the channel can be used under permissive conditions mandated by
+ * some regulatory bodies, i.e., the channel is marked with
+ * IEEE80211_CHAN_IR_CONCURRENT and there is an additional station interface
  * associated to an AP on the same channel or on the same UNII band
  * (assuming that the AP is an authorized master).
- * In addition allow the GO to operate on a channel on which indoor operation is
+ * In addition allow operation on a channel on which indoor operation is
  * allowed, iff we are currently operating in an indoor environment.
  */
-static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
+static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
+					enum nl80211_iftype iftype,
 					struct ieee80211_channel *chan)
 {
-	struct wireless_dev *wdev_iter;
-	struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
+	struct wireless_dev *wdev;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
 	ASSERT_RTNL();
 
@@ -718,32 +719,48 @@
 	    !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
 		return false;
 
+	/* only valid for GO and TDLS off-channel (station/p2p-CL) */
+	if (iftype != NL80211_IFTYPE_P2P_GO &&
+	    iftype != NL80211_IFTYPE_STATION &&
+	    iftype != NL80211_IFTYPE_P2P_CLIENT)
+		return false;
+
 	if (regulatory_indoor_allowed() &&
 	    (chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
 		return true;
 
-	if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
+	if (!(chan->flags & IEEE80211_CHAN_IR_CONCURRENT))
 		return false;
 
 	/*
 	 * Generally, it is possible to rely on another device/driver to allow
-	 * the GO concurrent relaxation, however, since the device can further
+	 * the IR concurrent relaxation, however, since the device can further
 	 * enforce the relaxation (by doing a similar verifications as this),
 	 * and thus fail the GO instantiation, consider only the interfaces of
 	 * the current registered device.
 	 */
-	list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wdev_list, list) {
 		struct ieee80211_channel *other_chan = NULL;
 		int r1, r2;
 
-		if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
-		    !netif_running(wdev_iter->netdev))
-			continue;
+		wdev_lock(wdev);
+		if (wdev->iftype == NL80211_IFTYPE_STATION &&
+		    wdev->current_bss)
+			other_chan = wdev->current_bss->pub.channel;
 
-		wdev_lock(wdev_iter);
-		if (wdev_iter->current_bss)
-			other_chan = wdev_iter->current_bss->pub.channel;
-		wdev_unlock(wdev_iter);
+		/*
+		 * If a GO already operates on the same GO_CONCURRENT channel,
+		 * this one (maybe the same one) can beacon as well. We allow
+		 * the operation even if the station we relied on with
+		 * GO_CONCURRENT is disconnected now. But then we must make sure
+		 * we're not outdoor on an indoor-only channel.
+		 */
+		if (iftype == NL80211_IFTYPE_P2P_GO &&
+		    wdev->iftype == NL80211_IFTYPE_P2P_GO &&
+		    wdev->beacon_interval &&
+		    !(chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
+			other_chan = wdev->chandef.chan;
+		wdev_unlock(wdev);
 
 		if (!other_chan)
 			continue;
@@ -784,7 +801,6 @@
 			     struct cfg80211_chan_def *chandef,
 			     enum nl80211_iftype iftype)
 {
-	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	bool res;
 	u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
 			       IEEE80211_CHAN_RADAR;
@@ -792,13 +808,12 @@
 	trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
 
 	/*
-	 * Under certain conditions suggested by the some regulatory bodies
-	 * a GO can operate on channels marked with IEEE80211_NO_IR
-	 * so set this flag only if such relaxations are not enabled and
-	 * the conditions are not met.
+	 * Under certain conditions suggested by some regulatory bodies a
+	 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
+	 * only if such relaxations are not enabled and the conditions are not
+	 * met.
 	 */
-	if (iftype != NL80211_IFTYPE_P2P_GO ||
-	    !cfg80211_go_permissive_chan(rdev, chandef->chan))
+	if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
 		prohibited_flags |= IEEE80211_CHAN_NO_IR;
 
 	if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 801cd49..311eef2 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -222,6 +222,7 @@
 			const u8 *ie;
 			size_t ie_len;
 			u16 reason;
+			bool locally_generated;
 		} dc;
 		struct {
 			u8 bssid[ETH_ALEN];
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index dd78445..c264eff 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -639,8 +639,8 @@
 		if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
 		    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
 			goto nla_put_failure;
-		if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
-		    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
+		if ((chan->flags & IEEE80211_CHAN_IR_CONCURRENT) &&
+		    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_IR_CONCURRENT))
 			goto nla_put_failure;
 		if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
 		    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
@@ -4061,7 +4061,8 @@
 			return -EINVAL;
 		break;
 	case CFG80211_STA_MESH_PEER_USER:
-		if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
+		if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION &&
+		    params->plink_action != NL80211_PLINK_ACTION_BLOCK)
 			return -EINVAL;
 		break;
 	}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 0e347f8..d359e06 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -989,8 +989,8 @@
 		channel_flags |= IEEE80211_CHAN_NO_OFDM;
 	if (rd_flags & NL80211_RRF_NO_OUTDOOR)
 		channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
-	if (rd_flags & NL80211_RRF_GO_CONCURRENT)
-		channel_flags |= IEEE80211_CHAN_GO_CONCURRENT;
+	if (rd_flags & NL80211_RRF_IR_CONCURRENT)
+		channel_flags |= IEEE80211_CHAN_IR_CONCURRENT;
 	if (rd_flags & NL80211_RRF_NO_HT40MINUS)
 		channel_flags |= IEEE80211_CHAN_NO_HT40MINUS;
 	if (rd_flags & NL80211_RRF_NO_HT40PLUS)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d11454f..8020b5b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -938,7 +938,8 @@
 }
 
 void cfg80211_disconnected(struct net_device *dev, u16 reason,
-			   const u8 *ie, size_t ie_len, gfp_t gfp)
+			   const u8 *ie, size_t ie_len,
+			   bool locally_generated, gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -954,6 +955,7 @@
 	ev->dc.ie_len = ie_len;
 	memcpy((void *)ev->dc.ie, ie, ie_len);
 	ev->dc.reason = reason;
+	ev->dc.locally_generated = locally_generated;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9ee6bc1..9cee022 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -86,7 +86,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
 {
 	struct wireless_dev *wdev;
@@ -95,7 +95,7 @@
 		cfg80211_leave(rdev, wdev);
 }
 
-static int wiphy_suspend(struct device *dev, pm_message_t state)
+static int wiphy_suspend(struct device *dev)
 {
 	struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
 	int ret = 0;
@@ -136,6 +136,11 @@
 
 	return ret;
 }
+
+static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume);
+#define WIPHY_PM_OPS (&wiphy_pm_ops)
+#else
+#define WIPHY_PM_OPS NULL
 #endif
 
 static const void *wiphy_namespace(struct device *d)
@@ -151,10 +156,7 @@
 	.dev_release = wiphy_dev_release,
 	.dev_groups = ieee80211_groups,
 	.dev_uevent = wiphy_uevent,
-#ifdef CONFIG_PM
-	.suspend = wiphy_suspend,
-	.resume = wiphy_resume,
-#endif
+	.pm = WIPHY_PM_OPS,
 	.ns_type = &net_ns_type_operations,
 	.namespace = wiphy_namespace,
 };
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 70051ab..baf7218 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -887,7 +887,8 @@
 		case EVENT_DISCONNECTED:
 			__cfg80211_disconnected(wdev->netdev,
 						ev->dc.ie, ev->dc.ie_len,
-						ev->dc.reason, true);
+						ev->dc.reason,
+						!ev->dc.locally_generated);
 			break;
 		case EVENT_IBSS_JOINED:
 			__cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
@@ -944,7 +945,7 @@
 	     ntype == NL80211_IFTYPE_P2P_CLIENT))
 		return -EBUSY;
 
-	if (ntype != otype && netif_running(dev)) {
+	if (ntype != otype) {
 		dev->ieee80211_ptr->use_4addr = false;
 		dev->ieee80211_ptr->mesh_id_up_len = 0;
 		wdev_lock(dev->ieee80211_ptr);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index c3ab230..a750f33 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -515,10 +515,10 @@
 	.obj_size = sizeof(struct x25_sock),
 };
 
-static struct sock *x25_alloc_socket(struct net *net)
+static struct sock *x25_alloc_socket(struct net *net, int kern)
 {
 	struct x25_sock *x25;
-	struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
+	struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern);
 
 	if (!sk)
 		goto out;
@@ -553,7 +553,7 @@
 		goto out;
 
 	rc = -ENOBUFS;
-	if ((sk = x25_alloc_socket(net)) == NULL)
+	if ((sk = x25_alloc_socket(net, kern)) == NULL)
 		goto out;
 
 	x25 = x25_sk(sk);
@@ -602,7 +602,7 @@
 	if (osk->sk_type != SOCK_SEQPACKET)
 		goto out;
 
-	if ((sk = x25_alloc_socket(sock_net(osk))) == NULL)
+	if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL)
 		goto out;
 
 	x25 = x25_sk(sk);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b58286e..60ce701 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -31,7 +31,7 @@
 		return -EAFNOSUPPORT;
 	spin_lock_bh(&xfrm_input_afinfo_lock);
 	if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
-		err = -ENOBUFS;
+		err = -EEXIST;
 	else
 		rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
 	spin_unlock_bh(&xfrm_input_afinfo_lock);
@@ -254,13 +254,13 @@
 		skb->sp->xvec[skb->sp->len++] = x;
 
 		spin_lock(&x->lock);
-		if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
-			XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
-			goto drop_unlock;
-		}
 
 		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
-			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
+			if (x->km.state == XFRM_STATE_ACQ)
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+			else
+				XFRM_INC_STATS(net,
+					       LINUX_MIB_XFRMINSTATEINVALID);
 			goto drop_unlock;
 		}
 
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index fbcedbe..68ada2c 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -38,6 +38,18 @@
 	return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
 }
 
+/* Children define the path of the packet through the
+ * Linux networking.  Thus, destinations are stackable.
+ */
+
+static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
+{
+	struct dst_entry *child = dst_clone(skb_dst(skb)->child);
+
+	skb_dst_drop(skb);
+	return child;
+}
+
 static int xfrm_output_one(struct sk_buff *skb, int err)
 {
 	struct dst_entry *dst = skb_dst(skb);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 638af06..18cead7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -315,14 +315,6 @@
 }
 EXPORT_SYMBOL(xfrm_policy_destroy);
 
-static void xfrm_queue_purge(struct sk_buff_head *list)
-{
-	struct sk_buff *skb;
-
-	while ((skb = skb_dequeue(list)) != NULL)
-		kfree_skb(skb);
-}
-
 /* Rule must be locked. Release descentant resources, announce
  * entry dead. The rule must be unlinked from lists to the moment.
  */
@@ -335,7 +327,7 @@
 
 	if (del_timer(&policy->polq.hold_timer))
 		xfrm_pol_put(policy);
-	xfrm_queue_purge(&policy->polq.hold_queue);
+	skb_queue_purge(&policy->polq.hold_queue);
 
 	if (del_timer(&policy->timer))
 		xfrm_pol_put(policy);
@@ -708,6 +700,9 @@
 	struct xfrm_policy_queue *pq = &old->polq;
 	struct sk_buff_head list;
 
+	if (skb_queue_empty(&pq->hold_queue))
+		return;
+
 	__skb_queue_head_init(&list);
 
 	spin_lock_bh(&pq->hold_queue.lock);
@@ -716,9 +711,6 @@
 		xfrm_pol_put(old);
 	spin_unlock_bh(&pq->hold_queue.lock);
 
-	if (skb_queue_empty(&list))
-		return;
-
 	pq = &new->polq;
 
 	spin_lock_bh(&pq->hold_queue.lock);
@@ -1012,7 +1004,9 @@
 	if (list_empty(&walk->walk.all))
 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
 	else
-		x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
+		x = list_first_entry(&walk->walk.all,
+				     struct xfrm_policy_walk_entry, all);
+
 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
 		if (x->dead)
 			continue;
@@ -1120,6 +1114,9 @@
 	}
 	chain = &net->xfrm.policy_inexact[dir];
 	hlist_for_each_entry(pol, chain, bydst) {
+		if ((pol->priority >= priority) && ret)
+			break;
+
 		err = xfrm_policy_match(pol, fl, type, family, dir);
 		if (err) {
 			if (err == -ESRCH)
@@ -1128,13 +1125,13 @@
 				ret = ERR_PTR(err);
 				goto fail;
 			}
-		} else if (pol->priority < priority) {
+		} else {
 			ret = pol;
 			break;
 		}
 	}
-	if (ret)
-		xfrm_pol_hold(ret);
+
+	xfrm_pol_hold(ret);
 fail:
 	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
@@ -1955,7 +1952,7 @@
 
 purge_queue:
 	pq->timeout = 0;
-	xfrm_queue_purge(&pq->hold_queue);
+	skb_queue_purge(&pq->hold_queue);
 	xfrm_pol_put(pol);
 }
 
@@ -2814,7 +2811,7 @@
 		return -EAFNOSUPPORT;
 	spin_lock(&xfrm_policy_afinfo_lock);
 	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
-		err = -ENOBUFS;
+		err = -EEXIST;
 	else {
 		struct dst_ops *dst_ops = afinfo->dst_ops;
 		if (likely(dst_ops->kmem_cachep == NULL))
@@ -3209,16 +3206,17 @@
 	}
 	chain = &net->xfrm.policy_inexact[dir];
 	hlist_for_each_entry(pol, chain, bydst) {
+		if ((pol->priority >= priority) && ret)
+			break;
+
 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
-		    pol->type == type &&
-		    pol->priority < priority) {
+		    pol->type == type) {
 			ret = pol;
 			break;
 		}
 	}
 
-	if (ret)
-		xfrm_pol_hold(ret);
+	xfrm_pol_hold(ret);
 
 	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 96688cd..9895a8c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1626,7 +1626,7 @@
 	if (list_empty(&walk->all))
 		x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
 	else
-		x = list_entry(&walk->all, struct xfrm_state_walk, all);
+		x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
 	list_for_each_entry_from(x, &net->xfrm.state_all, all) {
 		if (x->state == XFRM_STATE_DEAD)
 			continue;
@@ -1908,7 +1908,7 @@
 		return -EAFNOSUPPORT;
 	spin_lock_bh(&xfrm_state_afinfo_lock);
 	if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
-		err = -ENOBUFS;
+		err = -EEXIST;
 	else
 		rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
 	spin_unlock_bh(&xfrm_state_afinfo_lock);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 76e3458..4450fed 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -6,45 +6,57 @@
 hostprogs-y += sock_example
 hostprogs-y += sockex1
 hostprogs-y += sockex2
+hostprogs-y += sockex3
 hostprogs-y += tracex1
 hostprogs-y += tracex2
 hostprogs-y += tracex3
 hostprogs-y += tracex4
+hostprogs-y += tracex5
+hostprogs-y += lathist
 
 test_verifier-objs := test_verifier.o libbpf.o
 test_maps-objs := test_maps.o libbpf.o
 sock_example-objs := sock_example.o libbpf.o
 sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
 sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
+sockex3-objs := bpf_load.o libbpf.o sockex3_user.o
 tracex1-objs := bpf_load.o libbpf.o tracex1_user.o
 tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
 tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
+tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
+lathist-objs := bpf_load.o libbpf.o lathist_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 always += sockex1_kern.o
 always += sockex2_kern.o
+always += sockex3_kern.o
 always += tracex1_kern.o
 always += tracex2_kern.o
 always += tracex3_kern.o
 always += tracex4_kern.o
+always += tracex5_kern.o
 always += tcbpf1_kern.o
+always += lathist_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
 HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
 HOSTLOADLIBES_sockex1 += -lelf
 HOSTLOADLIBES_sockex2 += -lelf
+HOSTLOADLIBES_sockex3 += -lelf
 HOSTLOADLIBES_tracex1 += -lelf
 HOSTLOADLIBES_tracex2 += -lelf
 HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
+HOSTLOADLIBES_tracex5 += -lelf
+HOSTLOADLIBES_lathist += -lelf
 
 # point this to your LLVM backend with bpf support
 LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
 
-%.o: %.c
+$(obj)/%.o: $(src)/%.c
 	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
 		-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
 		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index f960b5f..bdf1c16 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -21,6 +21,16 @@
 	(void *) BPF_FUNC_ktime_get_ns;
 static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
 	(void *) BPF_FUNC_trace_printk;
+static void (*bpf_tail_call)(void *ctx, void *map, int index) =
+	(void *) BPF_FUNC_tail_call;
+static unsigned long long (*bpf_get_smp_processor_id)(void) =
+	(void *) BPF_FUNC_get_smp_processor_id;
+static unsigned long long (*bpf_get_current_pid_tgid)(void) =
+	(void *) BPF_FUNC_get_current_pid_tgid;
+static unsigned long long (*bpf_get_current_uid_gid)(void) =
+	(void *) BPF_FUNC_get_current_uid_gid;
+static int (*bpf_get_current_comm)(void *buf, int buf_size) =
+	(void *) BPF_FUNC_get_current_comm;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 38dac5a..da86a8e 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -16,6 +16,7 @@
 #include <sys/ioctl.h>
 #include <sys/mman.h>
 #include <poll.h>
+#include <ctype.h>
 #include "libbpf.h"
 #include "bpf_helpers.h"
 #include "bpf_load.h"
@@ -29,6 +30,19 @@
 int prog_fd[MAX_PROGS];
 int event_fd[MAX_PROGS];
 int prog_cnt;
+int prog_array_fd = -1;
+
+static int populate_prog_array(const char *event, int prog_fd)
+{
+	int ind = atoi(event), err;
+
+	err = bpf_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
+	if (err < 0) {
+		printf("failed to store prog_fd in prog_array\n");
+		return -1;
+	}
+	return 0;
+}
 
 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
 {
@@ -54,12 +68,40 @@
 		return -1;
 	}
 
+	fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
+	if (fd < 0) {
+		printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
+		return -1;
+	}
+
+	prog_fd[prog_cnt++] = fd;
+
+	if (is_socket) {
+		event += 6;
+		if (*event != '/')
+			return 0;
+		event++;
+		if (!isdigit(*event)) {
+			printf("invalid prog number\n");
+			return -1;
+		}
+		return populate_prog_array(event, fd);
+	}
+
 	if (is_kprobe || is_kretprobe) {
 		if (is_kprobe)
 			event += 7;
 		else
 			event += 10;
 
+		if (*event == 0) {
+			printf("event name cannot be empty\n");
+			return -1;
+		}
+
+		if (isdigit(*event))
+			return populate_prog_array(event, fd);
+
 		snprintf(buf, sizeof(buf),
 			 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
 			 is_kprobe ? 'p' : 'r', event, event);
@@ -71,18 +113,6 @@
 		}
 	}
 
-	fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
-
-	if (fd < 0) {
-		printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
-		return -1;
-	}
-
-	prog_fd[prog_cnt++] = fd;
-
-	if (is_socket)
-		return 0;
-
 	strcpy(buf, DEBUGFS);
 	strcat(buf, "events/kprobes/");
 	strcat(buf, event);
@@ -130,6 +160,9 @@
 					   maps[i].max_entries);
 		if (map_fd[i] < 0)
 			return 1;
+
+		if (maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
+			prog_array_fd = map_fd[i];
 	}
 	return 0;
 }
diff --git a/samples/bpf/lathist_kern.c b/samples/bpf/lathist_kern.c
new file mode 100644
index 0000000..18fa088
--- /dev/null
+++ b/samples/bpf/lathist_kern.c
@@ -0,0 +1,99 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2015 BMW Car IT GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define MAX_ENTRIES	20
+#define MAX_CPU		4
+
+/* We need to stick to static allocated memory (an array instead of
+ * hash table) because managing dynamic memory from the
+ * trace_preempt_[on|off] tracepoints hooks is not supported.
+ */
+
+struct bpf_map_def SEC("maps") my_map = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(int),
+	.value_size = sizeof(u64),
+	.max_entries = MAX_CPU,
+};
+
+SEC("kprobe/trace_preempt_off")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	int cpu = bpf_get_smp_processor_id();
+	u64 *ts = bpf_map_lookup_elem(&my_map, &cpu);
+
+	if (ts)
+		*ts = bpf_ktime_get_ns();
+
+	return 0;
+}
+
+static unsigned int log2(unsigned int v)
+{
+	unsigned int r;
+	unsigned int shift;
+
+	r = (v > 0xFFFF) << 4; v >>= r;
+	shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
+	shift = (v > 0xF) << 2; v >>= shift; r |= shift;
+	shift = (v > 0x3) << 1; v >>= shift; r |= shift;
+	r |= (v >> 1);
+
+	return r;
+}
+
+static unsigned int log2l(unsigned long v)
+{
+	unsigned int hi = v >> 32;
+
+	if (hi)
+		return log2(hi) + 32;
+	else
+		return log2(v);
+}
+
+struct bpf_map_def SEC("maps") my_lat = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(int),
+	.value_size = sizeof(long),
+	.max_entries = MAX_CPU * MAX_ENTRIES,
+};
+
+SEC("kprobe/trace_preempt_on")
+int bpf_prog2(struct pt_regs *ctx)
+{
+	u64 *ts, cur_ts, delta;
+	int key, cpu;
+	long *val;
+
+	cpu = bpf_get_smp_processor_id();
+	ts = bpf_map_lookup_elem(&my_map, &cpu);
+	if (!ts)
+		return 0;
+
+	cur_ts = bpf_ktime_get_ns();
+	delta = log2l(cur_ts - *ts);
+
+	if (delta > MAX_ENTRIES - 1)
+		delta = MAX_ENTRIES - 1;
+
+	key = cpu * MAX_ENTRIES + delta;
+	val = bpf_map_lookup_elem(&my_lat, &key);
+	if (val)
+		__sync_fetch_and_add((long *)val, 1);
+
+	return 0;
+
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/lathist_user.c b/samples/bpf/lathist_user.c
new file mode 100644
index 0000000..65da8c1
--- /dev/null
+++ b/samples/bpf/lathist_user.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2015 BMW Car IT GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define MAX_ENTRIES	20
+#define MAX_CPU		4
+#define MAX_STARS	40
+
+struct cpu_hist {
+	long data[MAX_ENTRIES];
+	long max;
+};
+
+static struct cpu_hist cpu_hist[MAX_CPU];
+
+static void stars(char *str, long val, long max, int width)
+{
+	int i;
+
+	for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
+		str[i] = '*';
+	if (val > max)
+		str[i - 1] = '+';
+	str[i] = '\0';
+}
+
+static void print_hist(void)
+{
+	char starstr[MAX_STARS];
+	struct cpu_hist *hist;
+	int i, j;
+
+	/* clear screen */
+	printf("\033[2J");
+
+	for (j = 0; j < MAX_CPU; j++) {
+		hist = &cpu_hist[j];
+
+		/* ignore CPUs without data (maybe offline?) */
+		if (hist->max == 0)
+			continue;
+
+		printf("CPU %d\n", j);
+		printf("      latency        : count     distribution\n");
+		for (i = 1; i <= MAX_ENTRIES; i++) {
+			stars(starstr, hist->data[i - 1], hist->max, MAX_STARS);
+			printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
+				(1l << i) >> 1, (1l << i) - 1,
+				hist->data[i - 1], MAX_STARS, starstr);
+		}
+	}
+}
+
+static void get_data(int fd)
+{
+	long key, value;
+	int c, i;
+
+	for (i = 0; i < MAX_CPU; i++)
+		cpu_hist[i].max = 0;
+
+	for (c = 0; c < MAX_CPU; c++) {
+		for (i = 0; i < MAX_ENTRIES; i++) {
+			key = c * MAX_ENTRIES + i;
+			bpf_lookup_elem(fd, &key, &value);
+
+			cpu_hist[c].data[i] = value;
+			if (value > cpu_hist[c].max)
+				cpu_hist[c].max = value;
+		}
+	}
+}
+
+int main(int argc, char **argv)
+{
+	char filename[256];
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	while (1) {
+		get_data(map_fd[1]);
+		print_hist();
+		sleep(5);
+	}
+
+	return 0;
+}
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
new file mode 100644
index 0000000..41ae2fd
--- /dev/null
+++ b/samples/bpf/sockex3_kern.c
@@ -0,0 +1,290 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+#include <uapi/linux/in.h>
+#include <uapi/linux/if.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/if_tunnel.h>
+#include <uapi/linux/mpls.h>
+#define IP_MF		0x2000
+#define IP_OFFSET	0x1FFF
+
+#define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
+
+struct bpf_map_def SEC("maps") jmp_table = {
+	.type = BPF_MAP_TYPE_PROG_ARRAY,
+	.key_size = sizeof(u32),
+	.value_size = sizeof(u32),
+	.max_entries = 8,
+};
+
+#define PARSE_VLAN 1
+#define PARSE_MPLS 2
+#define PARSE_IP 3
+#define PARSE_IPV6 4
+
+/* protocol dispatch routine.
+ * It tail-calls next BPF program depending on eth proto
+ * Note, we could have used:
+ * bpf_tail_call(skb, &jmp_table, proto);
+ * but it would need large prog_array
+ */
+static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
+{
+	switch (proto) {
+	case ETH_P_8021Q:
+	case ETH_P_8021AD:
+		bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
+		break;
+	case ETH_P_MPLS_UC:
+	case ETH_P_MPLS_MC:
+		bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
+		break;
+	case ETH_P_IP:
+		bpf_tail_call(skb, &jmp_table, PARSE_IP);
+		break;
+	case ETH_P_IPV6:
+		bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
+		break;
+	}
+}
+
+struct vlan_hdr {
+	__be16 h_vlan_TCI;
+	__be16 h_vlan_encapsulated_proto;
+};
+
+struct flow_keys {
+	__be32 src;
+	__be32 dst;
+	union {
+		__be32 ports;
+		__be16 port16[2];
+	};
+	__u32 ip_proto;
+};
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+	return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+		& (IP_MF | IP_OFFSET);
+}
+
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
+{
+	__u64 w0 = load_word(ctx, off);
+	__u64 w1 = load_word(ctx, off + 4);
+	__u64 w2 = load_word(ctx, off + 8);
+	__u64 w3 = load_word(ctx, off + 12);
+
+	return (__u32)(w0 ^ w1 ^ w2 ^ w3);
+}
+
+struct globals {
+	struct flow_keys flow;
+};
+
+struct bpf_map_def SEC("maps") percpu_map = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct globals),
+	.max_entries = 32,
+};
+
+/* user poor man's per_cpu until native support is ready */
+static struct globals *this_cpu_globals(void)
+{
+	u32 key = bpf_get_smp_processor_id();
+
+	return bpf_map_lookup_elem(&percpu_map, &key);
+}
+
+/* some simple stats for user space consumption */
+struct pair {
+	__u64 packets;
+	__u64 bytes;
+};
+
+struct bpf_map_def SEC("maps") hash_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(struct flow_keys),
+	.value_size = sizeof(struct pair),
+	.max_entries = 1024,
+};
+
+static void update_stats(struct __sk_buff *skb, struct globals *g)
+{
+	struct flow_keys key = g->flow;
+	struct pair *value;
+
+	value = bpf_map_lookup_elem(&hash_map, &key);
+	if (value) {
+		__sync_fetch_and_add(&value->packets, 1);
+		__sync_fetch_and_add(&value->bytes, skb->len);
+	} else {
+		struct pair val = {1, skb->len};
+
+		bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
+	}
+}
+
+static __always_inline void parse_ip_proto(struct __sk_buff *skb,
+					   struct globals *g, __u32 ip_proto)
+{
+	__u32 nhoff = skb->cb[0];
+	int poff;
+
+	switch (ip_proto) {
+	case IPPROTO_GRE: {
+		struct gre_hdr {
+			__be16 flags;
+			__be16 proto;
+		};
+
+		__u32 gre_flags = load_half(skb,
+					    nhoff + offsetof(struct gre_hdr, flags));
+		__u32 gre_proto = load_half(skb,
+					    nhoff + offsetof(struct gre_hdr, proto));
+
+		if (gre_flags & (GRE_VERSION|GRE_ROUTING))
+			break;
+
+		nhoff += 4;
+		if (gre_flags & GRE_CSUM)
+			nhoff += 4;
+		if (gre_flags & GRE_KEY)
+			nhoff += 4;
+		if (gre_flags & GRE_SEQ)
+			nhoff += 4;
+
+		skb->cb[0] = nhoff;
+		parse_eth_proto(skb, gre_proto);
+		break;
+	}
+	case IPPROTO_IPIP:
+		parse_eth_proto(skb, ETH_P_IP);
+		break;
+	case IPPROTO_IPV6:
+		parse_eth_proto(skb, ETH_P_IPV6);
+		break;
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+		g->flow.ports = load_word(skb, nhoff);
+	case IPPROTO_ICMP:
+		g->flow.ip_proto = ip_proto;
+		update_stats(skb, g);
+		break;
+	default:
+		break;
+	}
+}
+
+PROG(PARSE_IP)(struct __sk_buff *skb)
+{
+	struct globals *g = this_cpu_globals();
+	__u32 nhoff, verlen, ip_proto;
+
+	if (!g)
+		return 0;
+
+	nhoff = skb->cb[0];
+
+	if (unlikely(ip_is_fragment(skb, nhoff)))
+		return 0;
+
+	ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
+
+	if (ip_proto != IPPROTO_GRE) {
+		g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
+		g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
+	}
+
+	verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
+	nhoff += (verlen & 0xF) << 2;
+
+	skb->cb[0] = nhoff;
+	parse_ip_proto(skb, g, ip_proto);
+	return 0;
+}
+
+PROG(PARSE_IPV6)(struct __sk_buff *skb)
+{
+	struct globals *g = this_cpu_globals();
+	__u32 nhoff, ip_proto;
+
+	if (!g)
+		return 0;
+
+	nhoff = skb->cb[0];
+
+	ip_proto = load_byte(skb,
+			     nhoff + offsetof(struct ipv6hdr, nexthdr));
+	g->flow.src = ipv6_addr_hash(skb,
+				     nhoff + offsetof(struct ipv6hdr, saddr));
+	g->flow.dst = ipv6_addr_hash(skb,
+				     nhoff + offsetof(struct ipv6hdr, daddr));
+	nhoff += sizeof(struct ipv6hdr);
+
+	skb->cb[0] = nhoff;
+	parse_ip_proto(skb, g, ip_proto);
+	return 0;
+}
+
+PROG(PARSE_VLAN)(struct __sk_buff *skb)
+{
+	__u32 nhoff, proto;
+
+	nhoff = skb->cb[0];
+
+	proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+						h_vlan_encapsulated_proto));
+	nhoff += sizeof(struct vlan_hdr);
+	skb->cb[0] = nhoff;
+
+	parse_eth_proto(skb, proto);
+
+	return 0;
+}
+
+PROG(PARSE_MPLS)(struct __sk_buff *skb)
+{
+	__u32 nhoff, label;
+
+	nhoff = skb->cb[0];
+
+	label = load_word(skb, nhoff);
+	nhoff += sizeof(struct mpls_label);
+	skb->cb[0] = nhoff;
+
+	if (label & MPLS_LS_S_MASK) {
+		__u8 verlen = load_byte(skb, nhoff);
+		if ((verlen & 0xF0) == 4)
+			parse_eth_proto(skb, ETH_P_IP);
+		else
+			parse_eth_proto(skb, ETH_P_IPV6);
+	} else {
+		parse_eth_proto(skb, ETH_P_MPLS_UC);
+	}
+
+	return 0;
+}
+
+SEC("socket/0")
+int main_prog(struct __sk_buff *skb)
+{
+	__u32 nhoff = ETH_HLEN;
+	__u32 proto = load_half(skb, 12);
+
+	skb->cb[0] = nhoff;
+	parse_eth_proto(skb, proto);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
new file mode 100644
index 0000000..2617772
--- /dev/null
+++ b/samples/bpf/sockex3_user.c
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+struct flow_keys {
+	__be32 src;
+	__be32 dst;
+	union {
+		__be32 ports;
+		__be16 port16[2];
+	};
+	__u32 ip_proto;
+};
+
+struct pair {
+	__u64 packets;
+	__u64 bytes;
+};
+
+int main(int argc, char **argv)
+{
+	char filename[256];
+	FILE *f;
+	int i, sock;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	sock = open_raw_sock("lo");
+
+	assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd[4],
+			  sizeof(__u32)) == 0);
+
+	if (argc > 1)
+		f = popen("ping -c5 localhost", "r");
+	else
+		f = popen("netperf -l 4 localhost", "r");
+	(void) f;
+
+	for (i = 0; i < 5; i++) {
+		struct flow_keys key = {}, next_key;
+		struct pair value;
+
+		sleep(1);
+		printf("IP     src.port -> dst.port               bytes      packets\n");
+		while (bpf_get_next_key(map_fd[2], &key, &next_key) == 0) {
+			bpf_lookup_elem(map_fd[2], &next_key, &value);
+			printf("%s.%05d -> %s.%05d %12lld %12lld\n",
+			       inet_ntoa((struct in_addr){htonl(next_key.src)}),
+			       next_key.port16[0],
+			       inet_ntoa((struct in_addr){htonl(next_key.dst)}),
+			       next_key.port16[1],
+			       value.bytes, value.packets);
+			key = next_key;
+		}
+	}
+	return 0;
+}
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index 7c27710..9bfb2eb 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -21,7 +21,7 @@
 
 static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
 {
-	__u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF);
+	__u8 old_tos = load_byte(skb, TOS_OFF);
 
 	bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
 	bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
@@ -34,7 +34,7 @@
 
 static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
 {
-	__u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF));
+	__u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
 
 	bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
 	bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
@@ -44,7 +44,7 @@
 #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
 static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
 {
-	__u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF));
+	__u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
 
 	bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
 	bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
@@ -53,7 +53,7 @@
 SEC("classifier")
 int bpf_prog1(struct __sk_buff *skb)
 {
-	__u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol));
+	__u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
 	long *value;
 
 	if (proto == IPPROTO_TCP) {
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 12f3780..6936059 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -29,6 +29,7 @@
 		ACCEPT,
 		REJECT
 	} result;
+	enum bpf_prog_type prog_type;
 };
 
 static struct bpf_test tests[] = {
@@ -743,6 +744,84 @@
 		.errstr = "different pointers",
 		.result = REJECT,
 	},
+	{
+		"check skb->mark is not writeable by sockets",
+		.insns = {
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid bpf_context access",
+		.result = REJECT,
+	},
+	{
+		"check skb->tc_index is not writeable by sockets",
+		.insns = {
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, tc_index)),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid bpf_context access",
+		.result = REJECT,
+	},
+	{
+		"check non-u32 access to cb",
+		.insns = {
+			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, cb[0])),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid bpf_context access",
+		.result = REJECT,
+	},
+	{
+		"check out of range skb->cb access",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, cb[60])),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid bpf_context access",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
+	},
+	{
+		"write skb fields from socket prog",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, cb[4])),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, tc_index)),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, cb[0])),
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, cb[2])),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
+	{
+		"write skb fields from tc_cls_act prog",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, cb[0])),
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, tc_index)),
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+				    offsetof(struct __sk_buff, tc_index)),
+			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+				    offsetof(struct __sk_buff, cb[3])),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
@@ -775,6 +854,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		struct bpf_insn *prog = tests[i].insns;
+		int prog_type = tests[i].prog_type;
 		int prog_len = probe_filter_length(prog);
 		int *fixup = tests[i].fixup;
 		int map_fd = -1;
@@ -789,8 +869,8 @@
 		}
 		printf("#%d %s ", i, tests[i].descr);
 
-		prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog,
-					prog_len * sizeof(struct bpf_insn),
+		prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
+					prog, prog_len * sizeof(struct bpf_insn),
 					"GPL", 0);
 
 		if (tests[i].result == ACCEPT) {
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index 19ec1cf..dc50f4f 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -62,11 +62,18 @@
 		return log2(v);
 }
 
+struct hist_key {
+	char comm[16];
+	u64 pid_tgid;
+	u64 uid_gid;
+	u32 index;
+};
+
 struct bpf_map_def SEC("maps") my_hist_map = {
-	.type = BPF_MAP_TYPE_ARRAY,
-	.key_size = sizeof(u32),
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(struct hist_key),
 	.value_size = sizeof(long),
-	.max_entries = 64,
+	.max_entries = 1024,
 };
 
 SEC("kprobe/sys_write")
@@ -75,11 +82,18 @@
 	long write_size = ctx->dx; /* arg3 */
 	long init_val = 1;
 	long *value;
-	u32 index = log2l(write_size);
+	struct hist_key key = {};
 
-	value = bpf_map_lookup_elem(&my_hist_map, &index);
+	key.index = log2l(write_size);
+	key.pid_tgid = bpf_get_current_pid_tgid();
+	key.uid_gid = bpf_get_current_uid_gid();
+	bpf_get_current_comm(&key.comm, sizeof(key.comm));
+
+	value = bpf_map_lookup_elem(&my_hist_map, &key);
 	if (value)
 		__sync_fetch_and_add(value, 1);
+	else
+		bpf_map_update_elem(&my_hist_map, &key, &init_val, BPF_ANY);
 	return 0;
 }
 char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index 91b8d08..cd0241c 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -3,6 +3,7 @@
 #include <stdlib.h>
 #include <signal.h>
 #include <linux/bpf.h>
+#include <string.h>
 #include "libbpf.h"
 #include "bpf_load.h"
 
@@ -20,23 +21,42 @@
 	str[i] = '\0';
 }
 
-static void print_hist(int fd)
+struct task {
+	char comm[16];
+	__u64 pid_tgid;
+	__u64 uid_gid;
+};
+
+struct hist_key {
+	struct task t;
+	__u32 index;
+};
+
+#define SIZE sizeof(struct task)
+
+static void print_hist_for_pid(int fd, void *task)
 {
-	int key;
+	struct hist_key key = {}, next_key;
+	char starstr[MAX_STARS];
 	long value;
 	long data[MAX_INDEX] = {};
-	char starstr[MAX_STARS];
-	int i;
 	int max_ind = -1;
 	long max_value = 0;
+	int i, ind;
 
-	for (key = 0; key < MAX_INDEX; key++) {
-		bpf_lookup_elem(fd, &key, &value);
-		data[key] = value;
-		if (value && key > max_ind)
-			max_ind = key;
+	while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+		if (memcmp(&next_key, task, SIZE)) {
+			key = next_key;
+			continue;
+		}
+		bpf_lookup_elem(fd, &next_key, &value);
+		ind = next_key.index;
+		data[ind] = value;
+		if (value && ind > max_ind)
+			max_ind = ind;
 		if (value > max_value)
 			max_value = value;
+		key = next_key;
 	}
 
 	printf("           syscall write() stats\n");
@@ -48,6 +68,35 @@
 		       MAX_STARS, starstr);
 	}
 }
+
+static void print_hist(int fd)
+{
+	struct hist_key key = {}, next_key;
+	static struct task tasks[1024];
+	int task_cnt = 0;
+	int i;
+
+	while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+		int found = 0;
+
+		for (i = 0; i < task_cnt; i++)
+			if (memcmp(&tasks[i], &next_key, SIZE) == 0)
+				found = 1;
+		if (!found)
+			memcpy(&tasks[task_cnt++], &next_key, SIZE);
+		key = next_key;
+	}
+
+	for (i = 0; i < task_cnt; i++) {
+		printf("\npid %d cmd %s uid %d\n",
+		       (__u32) tasks[i].pid_tgid,
+		       tasks[i].comm,
+		       (__u32) tasks[i].uid_gid);
+		print_hist_for_pid(fd, &tasks[i]);
+	}
+
+}
+
 static void int_exit(int sig)
 {
 	print_hist(map_fd[1]);
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
new file mode 100644
index 0000000..b71fe07
--- /dev/null
+++ b/samples/bpf/tracex5_kern.c
@@ -0,0 +1,75 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/seccomp.h>
+#include "bpf_helpers.h"
+
+#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
+
+struct bpf_map_def SEC("maps") progs = {
+	.type = BPF_MAP_TYPE_PROG_ARRAY,
+	.key_size = sizeof(u32),
+	.value_size = sizeof(u32),
+	.max_entries = 1024,
+};
+
+SEC("kprobe/seccomp_phase1")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	struct seccomp_data sd = {};
+
+	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+
+	/* dispatch into next BPF program depending on syscall number */
+	bpf_tail_call(ctx, &progs, sd.nr);
+
+	/* fall through -> unknown syscall */
+	if (sd.nr >= __NR_getuid && sd.nr <= __NR_getsid) {
+		char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n";
+		bpf_trace_printk(fmt, sizeof(fmt), sd.nr);
+	}
+	return 0;
+}
+
+/* we jump here when syscall number == __NR_write */
+PROG(__NR_write)(struct pt_regs *ctx)
+{
+	struct seccomp_data sd = {};
+
+	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	if (sd.args[2] == 512) {
+		char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
+		bpf_trace_printk(fmt, sizeof(fmt),
+				 sd.args[0], sd.args[1], sd.args[2]);
+	}
+	return 0;
+}
+
+PROG(__NR_read)(struct pt_regs *ctx)
+{
+	struct seccomp_data sd = {};
+
+	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	if (sd.args[2] > 128 && sd.args[2] <= 1024) {
+		char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
+		bpf_trace_printk(fmt, sizeof(fmt),
+				 sd.args[0], sd.args[1], sd.args[2]);
+	}
+	return 0;
+}
+
+PROG(__NR_mmap)(struct pt_regs *ctx)
+{
+	char fmt[] = "mmap\n";
+	bpf_trace_printk(fmt, sizeof(fmt));
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
new file mode 100644
index 0000000..a04dd3c
--- /dev/null
+++ b/samples/bpf/tracex5_user.c
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <sys/prctl.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+/* install fake seccomp program to enable seccomp code path inside the kernel,
+ * so that our kprobe attached to seccomp_phase1() can be triggered
+ */
+static void install_accept_all_seccomp(void)
+{
+	struct sock_filter filter[] = {
+		BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+	};
+	struct sock_fprog prog = {
+		.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
+		.filter = filter,
+	};
+	if (prctl(PR_SET_SECCOMP, 2, &prog))
+		perror("prctl");
+}
+
+int main(int ac, char **argv)
+{
+	FILE *f;
+	char filename[256];
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	install_accept_all_seccomp();
+
+	f = popen("dd if=/dev/zero of=/dev/null count=5", "r");
+	(void) f;
+
+	read_trace_pipe();
+
+	return 0;
+}
diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst
new file mode 100644
index 0000000..8365c4e
--- /dev/null
+++ b/samples/pktgen/README.rst
@@ -0,0 +1,43 @@
+Sample and benchmark scripts for pktgen (packet generator)
+==========================================================
+This directory contains some pktgen sample and benchmark scripts, that
+can easily be copied and adjusted for your own use-case.
+
+General doc is located in kernel: Documentation/networking/pktgen.txt
+
+Helper include files
+====================
+This directory contains two helper shell files, that can be "included"
+by shell source'ing.  Namely "functions.sh" and "parameters.sh".
+
+Common parameters
+-----------------
+The parameters.sh file support easy and consistant parameter parsing
+across the sample scripts.  Usage example is printed on errors::
+
+ Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+  -i : ($DEV)       output interface/device (required)
+  -s : ($PKT_SIZE)  packet size
+  -d : ($DEST_IP)   destination IP
+  -m : ($DST_MAC)   destination MAC-addr
+  -t : ($THREADS)   threads to start
+  -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+  -b : ($BURST)     HW level bursting of SKBs
+  -v : ($VERBOSE)   verbose
+  -x : ($DEBUG)     debug
+
+The global variable being set is also listed.  E.g. the required
+interface/device parameter "-i" sets variable $DEV.
+
+Common functions
+----------------
+The functions.sh file provides; Three different shell functions for
+configuring the different components of pktgen: pg_ctrl(), pg_thread()
+and pg_set().
+
+These functions correspond to pktgens different components.
+ * pg_ctrl()   control "pgctrl" (/proc/net/pktgen/pgctrl)
+ * pg_thread() control the kernel threads and binding to devices
+ * pg_set()    control setup of individual devices
+
+See sample scripts for usage examples.
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
new file mode 100644
index 0000000..205e4cd
--- /dev/null
+++ b/samples/pktgen/functions.sh
@@ -0,0 +1,121 @@
+#
+# Common functions used by pktgen scripts
+#  - Depending on bash 3 (or higher) syntax
+#
+# Author: Jesper Dangaaard Brouer
+# License: GPL
+
+## -- General shell logging cmds --
+function err() {
+    local exitcode=$1
+    shift
+    echo "ERROR: $@" >&2
+    exit $exitcode
+}
+
+function warn() {
+    echo "WARN : $@" >&2
+}
+
+function info() {
+    if [[ -n "$VERBOSE" ]]; then
+	echo "INFO : $@" >&2
+    fi
+}
+
+## -- Pktgen proc config commands -- ##
+export PROC_DIR=/proc/net/pktgen
+#
+# Three different shell functions for configuring the different
+# components of pktgen:
+#   pg_ctrl(), pg_thread() and pg_set().
+#
+# These functions correspond to pktgens different components.
+# * pg_ctrl()   control "pgctrl" (/proc/net/pktgen/pgctrl)
+# * pg_thread() control the kernel threads and binding to devices
+# * pg_set()    control setup of individual devices
+function pg_ctrl() {
+    local proc_file="pgctrl"
+    proc_cmd ${proc_file} "$@"
+}
+
+function pg_thread() {
+    local thread=$1
+    local proc_file="kpktgend_${thread}"
+    shift
+    proc_cmd ${proc_file} "$@"
+}
+
+function pg_set() {
+    local dev=$1
+    local proc_file="$dev"
+    shift
+    proc_cmd ${proc_file} "$@"
+}
+
+# More generic replacement for pgset(), that does not depend on global
+# variable for proc file.
+function proc_cmd() {
+    local result
+    local proc_file=$1
+    # after shift, the remaining args are contained in $@
+    shift
+    local proc_ctrl=${PROC_DIR}/$proc_file
+    if [[ ! -e "$proc_ctrl" ]]; then
+	err 3 "proc file:$proc_ctrl does not exists (dev added to thread?)"
+    else
+	if [[ ! -w "$proc_ctrl" ]]; then
+	    err 4 "proc file:$proc_ctrl not writable, not root?!"
+	fi
+    fi
+
+    if [[ "$DEBUG" == "yes" ]]; then
+	echo "cmd: $@ > $proc_ctrl"
+    fi
+    # Quoting of "$@" is important for space expansion
+    echo "$@" > "$proc_ctrl"
+    local status=$?
+
+    result=$(grep "Result: OK:" $proc_ctrl)
+    # Due to pgctrl, cannot use exit code $? from grep
+    if [[ "$result" == "" ]]; then
+	grep "Result:" $proc_ctrl >&2
+    fi
+    if (( $status != 0 )); then
+	err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
+    fi
+}
+
+# Old obsolete "pgset" function, with slightly improved err handling
+function pgset() {
+    local result
+
+    if [[ "$DEBUG" == "yes" ]]; then
+	echo "cmd: $1 > $PGDEV"
+    fi
+    echo $1 > $PGDEV
+    local status=$?
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [[ "$result" == "" ]]; then
+         cat $PGDEV | fgrep Result:
+    fi
+    if (( $status != 0 )); then
+	err 5 "Write error($status) occurred cmd: \"$1 > $PGDEV\""
+    fi
+}
+
+## -- General shell tricks --
+
+function root_check_run_with_sudo() {
+    # Trick so, program can be run as normal user, will just use "sudo"
+    #  call as root_check_run_as_sudo "$@"
+    if [ "$EUID" -ne 0 ]; then
+	if [ -x $0 ]; then # Directly executable use sudo
+	    info "Not root, running with sudo"
+            sudo "$0" "$@"
+            exit $?
+	fi
+	err 4 "cannot perform sudo run of $0"
+    fi
+}
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
new file mode 100644
index 0000000..33b70fd
--- /dev/null
+++ b/samples/pktgen/parameters.sh
@@ -0,0 +1,97 @@
+#
+# Common parameter parsing for pktgen scripts
+#
+
+function usage() {
+    echo ""
+    echo "Usage: $0 [-vx] -i ethX"
+    echo "  -i : (\$DEV)       output interface/device (required)"
+    echo "  -s : (\$PKT_SIZE)  packet size"
+    echo "  -d : (\$DEST_IP)   destination IP"
+    echo "  -m : (\$DST_MAC)   destination MAC-addr"
+    echo "  -t : (\$THREADS)   threads to start"
+    echo "  -c : (\$SKB_CLONE) SKB clones send before alloc new SKB"
+    echo "  -b : (\$BURST)     HW level bursting of SKBs"
+    echo "  -v : (\$VERBOSE)   verbose"
+    echo "  -x : (\$DEBUG)     debug"
+    echo ""
+}
+
+##  --- Parse command line arguments / parameters ---
+## echo "Commandline options:"
+while getopts "s:i:d:m:t:c:b:vxh" option; do
+    case $option in
+        i) # interface
+          export DEV=$OPTARG
+	  info "Output device set to: DEV=$DEV"
+          ;;
+        s)
+          export PKT_SIZE=$OPTARG
+	  info "Packet size set to: PKT_SIZE=$PKT_SIZE bytes"
+          ;;
+        d) # destination IP
+          export DEST_IP=$OPTARG
+	  info "Destination IP set to: DEST_IP=$DEST_IP"
+          ;;
+        m) # MAC
+          export DST_MAC=$OPTARG
+	  info "Destination MAC set to: DST_MAC=$DST_MAC"
+          ;;
+        t)
+	  export THREADS=$OPTARG
+          export CPU_THREADS=$OPTARG
+	  let "CPU_THREADS -= 1"
+	  info "Number of threads to start: $THREADS (0 to $CPU_THREADS)"
+          ;;
+        c)
+	  export CLONE_SKB=$OPTARG
+	  info "CLONE_SKB=$CLONE_SKB"
+          ;;
+        b)
+	  export BURST=$OPTARG
+	  info "SKB bursting: BURST=$BURST"
+          ;;
+        v)
+          export VERBOSE=yes
+          info "Verbose mode: VERBOSE=$VERBOSE"
+          ;;
+        x)
+          export DEBUG=yes
+          info "Debug mode: DEBUG=$DEBUG"
+          ;;
+        h|?|*)
+          usage;
+          err 2 "[ERROR] Unknown parameters!!!"
+    esac
+done
+shift $(( $OPTIND - 1 ))
+
+if [ -z "$PKT_SIZE" ]; then
+    # NIC adds 4 bytes CRC
+    export PKT_SIZE=60
+    info "Default packet size set to: set to: $PKT_SIZE bytes"
+fi
+
+if [ -z "$THREADS" ]; then
+    # Zero CPU threads means one thread, because CPU numbers are zero indexed
+    export CPU_THREADS=0
+    export THREADS=1
+fi
+
+if [ -z "$DEV" ]; then
+    usage
+    err 2 "Please specify output device"
+fi
+
+if [ -z "$DST_MAC" ]; then
+    warn "Missing destination MAC address"
+fi
+
+if [ -z "$DEST_IP" ]; then
+    warn "Missing destination IP address"
+fi
+
+if [ ! -d /proc/net/pktgen ]; then
+    info "Loading kernel module: pktgen"
+    modprobe pktgen
+fi
diff --git a/samples/pktgen/pktgen.conf-1-1 b/samples/pktgen/pktgen.conf-1-1
deleted file mode 100755
index f91daad..0000000
--- a/samples/pktgen/pktgen.conf-1-1
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. One CPU example. We add eth1.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth1"
- pgset "add_device eth1"
-
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac  00:04:23:08:91:dc"
-
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-1 b/samples/pktgen/pktgen.conf-2-1
deleted file mode 100755
index e108e97..0000000
--- a/samples/pktgen/pktgen.conf-2-1
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. Two CPU example. We add eth1 to the first
-# and leave the second idle.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth1"
- pgset "add_device eth1"
-
-# We need to remove old config since we dont use this thread. We can only
-# one NIC on one CPU due to affinity reasons.
-
-PGDEV=/proc/net/pktgen/kpktgend_1
-  echo "Removing all devices"
- pgset "rem_device_all"
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac  00:04:23:08:91:dc"
-
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-2 b/samples/pktgen/pktgen.conf-2-2
deleted file mode 100755
index acea155..0000000
--- a/samples/pktgen/pktgen.conf-2-2
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. Two CPU example. We add eth1, eth2 respectively.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth1"
- pgset "add_device eth1"
-
-PGDEV=/proc/net/pktgen/kpktgend_1
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth2"
- pgset "add_device eth2"
-
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac  00:04:23:08:91:dc"
-
-PGDEV=/proc/net/pktgen/eth2
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 192.168.2.2"
- pgset "dst_mac  00:04:23:08:91:de"
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
new file mode 100755
index 0000000..cb15903
--- /dev/null
+++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+# Benchmark script:
+#  - developed for benchmarking ingress qdisc path
+#
+# Script for injecting packets into RX path of the stack with pktgen
+# "xmit_mode netif_receive".  With an invalid dst_mac this will only
+# measure the ingress code path as packets gets dropped in ip_rcv().
+#
+# This script don't really need any hardware.  It benchmarks software
+# RX path just after NIC driver level.  With bursting is also
+# "removes" the SKB alloc/free overhead.
+#
+# Setup scenarios for measuring ingress qdisc (with invalid dst_mac):
+# ------------------------------------------------------------------
+# (1) no ingress (uses static_key_false(&ingress_needed))
+#
+# (2) ingress on other dev (change ingress_needed and calls
+#     handle_ing() but exit early)
+#
+#  config:  tc qdisc add dev $SOMEDEV handle ffff: ingress
+#
+# (3) ingress on this dev, handle_ing() -> tc_classify()
+#
+#  config:  tc qdisc add dev $DEV handle ffff: ingress
+#
+# (4) ingress on this dev + drop at u32 classifier/action.
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Using invalid DST_MAC will cause the packets to get dropped in
+# ip_rcv() which is part of the test
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ] && BURST=1024
+
+# Base Config
+DELAY="0"        # Zero means max speed
+COUNT="10000000" # Zero means indefinitely
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+    # The device name is extended with @name, using thread number to
+    # make then unique, but any name will do.
+    dev=${DEV}@${thread}
+
+    # Add remove all other devices and add_device $dev to thread
+    pg_thread $thread "rem_device_all"
+    pg_thread $thread "add_device" $dev
+
+    # Base config of dev
+    pg_set $dev "flag QUEUE_MAP_CPU"
+    pg_set $dev "count $COUNT"
+    pg_set $dev "pkt_size $PKT_SIZE"
+    pg_set $dev "delay $DELAY"
+    pg_set $dev "flag NO_TIMESTAMP"
+
+    # Destination
+    pg_set $dev "dst_mac $DST_MAC"
+    pg_set $dev "dst $DEST_IP"
+
+    # Inject packet into RX path of stack
+    pg_set $dev "xmit_mode netif_receive"
+
+    # Burst allow us to avoid measuring SKB alloc/free overhead
+    pg_set $dev "burst $BURST"
+done
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+for ((thread = 0; thread < $THREADS; thread++)); do
+    dev=${DEV}@${thread}
+    echo "Device: $dev"
+    cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+done
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
new file mode 100755
index 0000000..8c9d318
--- /dev/null
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Simple example:
+#  * pktgen sending with single thread and single interface
+#  * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+# - go look in parameters.sh to see which setting are avail
+# - required param is the interface "-i" stored in $DEV
+source ${basedir}/parameters.sh
+#
+# Set some default params, if they didn't get set
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+# Example enforce param "-m" for dst_mac
+[ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac"
+
+# Base Config
+DELAY="0"        # Zero means max speed
+COUNT="100000"   # Zero means indefinitely
+
+# Flow variation random source port between min and max
+UDP_MIN=9
+UDP_MAX=109
+
+# General cleanup everything since last run
+# (especially important if other threads were configured by other scripts)
+pg_ctrl "reset"
+
+# Add remove all other devices and add_device $DEV to thread 0
+thread=0
+pg_thread $thread "rem_device_all"
+pg_thread $thread "add_device" $DEV
+
+# How many packets to send (zero means indefinitely)
+pg_set $DEV "count $COUNT"
+
+# Reduce alloc cost by sending same SKB many times
+# - this obviously affects the randomness within the packet
+pg_set $DEV "clone_skb $CLONE_SKB"
+
+# Set packet size
+pg_set $DEV "pkt_size $PKT_SIZE"
+
+# Delay between packets (zero means max speed)
+pg_set $DEV "delay $DELAY"
+
+# Flag example disabling timestamping
+pg_set $DEV "flag NO_TIMESTAMP"
+
+# Destination
+pg_set $DEV "dst_mac $DST_MAC"
+pg_set $DEV "dst $DEST_IP"
+
+# Setup random UDP port src range
+pg_set $DEV "flag UDPSRC_RND"
+pg_set $DEV "udp_src_min $UDP_MIN"
+pg_set $DEV "udp_src_max $UDP_MAX"
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+echo "Result device: $DEV"
+cat /proc/net/pktgen/$DEV
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
new file mode 100755
index 0000000..32467ae
--- /dev/null
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Multiqueue: Using pktgen threads for sending on multiple CPUs
+#  * adding devices to kernel threads
+#  * notice the naming scheme for keeping device names unique
+#  * nameing scheme: dev@thread_number
+#  * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+#
+# Required param: -i dev in $DEV
+source ${basedir}/parameters.sh
+
+# Base Config
+DELAY="0"        # Zero means max speed
+COUNT="100000"   # Zero means indefinitely
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+
+# Flow variation random source port between min and max
+UDP_MIN=9
+UDP_MAX=109
+
+# (example of setting default params in your script)
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+    # The device name is extended with @name, using thread number to
+    # make then unique, but any name will do.
+    dev=${DEV}@${thread}
+
+    # Add remove all other devices and add_device $dev to thread
+    pg_thread $thread "rem_device_all"
+    pg_thread $thread "add_device" $dev
+
+    # Notice config queue to map to cpu (mirrors smp_processor_id())
+    # It is beneficial to map IRQ /proc/irq/*/smp_affinity 1:1 to CPU number
+    pg_set $dev "flag QUEUE_MAP_CPU"
+
+    # Base config of dev
+    pg_set $dev "count $COUNT"
+    pg_set $dev "clone_skb $CLONE_SKB"
+    pg_set $dev "pkt_size $PKT_SIZE"
+    pg_set $dev "delay $DELAY"
+
+    # Flag example disabling timestamping
+    pg_set $dev "flag NO_TIMESTAMP"
+
+    # Destination
+    pg_set $dev "dst_mac $DST_MAC"
+    pg_set $dev "dst $DEST_IP"
+
+    # Setup random UDP port src range
+    pg_set $dev "flag UDPSRC_RND"
+    pg_set $dev "udp_src_min $UDP_MIN"
+    pg_set $dev "udp_src_max $UDP_MAX"
+done
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+for ((thread = 0; thread < $THREADS; thread++)); do
+    dev=${DEV}@${thread}
+    echo "Device: $dev"
+    cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+done
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
new file mode 100755
index 0000000..775f5d0
--- /dev/null
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+#
+# Script for max single flow performance
+#  - If correctly tuned[1], single CPU 10G wirespeed small pkts is possible[2]
+#
+# Using pktgen "burst" option (use -b $N)
+#  - To boost max performance
+#  - Avail since: kernel v3.18
+#   * commit 38b2cf2982dc73 ("net: pktgen: packet bursting via skb->xmit_more")
+#  - This avoids writing the HW tailptr on every driver xmit
+#  - The performance boost is impressive, see commit and blog [2]
+#
+# Notice: On purpose generates a single (UDP) flow towards target,
+#   reason behind this is to only overload/activate a single CPU on
+#   target host.  And no randomness for pktgen also makes it faster.
+#
+# Tuning see:
+#  [1] http://netoptimizer.blogspot.dk/2014/06/pktgen-for-network-overload-testing.html
+#  [2] http://netoptimizer.blogspot.dk/2014/10/unlocked-10gbps-tx-wirespeed-smallest.html
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Set some default params, if they didn't get set
+[ -z "$DEST_IP" ]   && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ]   && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ]     && BURST=32
+[ -z "$CLONE_SKB" ] && CLONE_SKB="100000"
+
+# Base Config
+DELAY="0"  # Zero means max speed
+COUNT="0"  # Zero means indefinitely
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+    dev=${DEV}@${thread}
+
+    # Add remove all other devices and add_device $dev to thread
+    pg_thread $thread "rem_device_all"
+    pg_thread $thread "add_device" $dev
+
+    # Base config
+    pg_set $dev "flag QUEUE_MAP_CPU"
+    pg_set $dev "count $COUNT"
+    pg_set $dev "clone_skb $CLONE_SKB"
+    pg_set $dev "pkt_size $PKT_SIZE"
+    pg_set $dev "delay $DELAY"
+    pg_set $dev "flag NO_TIMESTAMP"
+
+    # Destination
+    pg_set $dev "dst_mac $DST_MAC"
+    pg_set $dev "dst $DEST_IP"
+
+    # Setup burst, for easy testing -b 0 disable bursting
+    # (internally in pktgen default and minimum burst=1)
+    if [[ ${BURST} -ne 0 ]]; then
+	pg_set $dev "burst $BURST"
+    else
+	info "$dev: Not using burst"
+    fi
+done
+
+# Run if user hits control-c
+function control_c() {
+    # Print results
+    for ((thread = 0; thread < $THREADS; thread++)); do
+	dev=${DEV}@${thread}
+	echo "Device: $dev"
+	cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+    done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap control_c SIGINT
+
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 98b0426..56e354f 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -35,9 +35,6 @@
 #include <linux/init.h>
 #include <linux/security.h>
 #include <linux/types.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv6.h>
 #include <linux/slab.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 6f67333..08c2a36 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -272,7 +272,7 @@
 	const int expect_hash[2][2]	= { { 15, 5 },  { 20, 5 } };
 	const int expect_hash_rb[2][2]	= { { 15, 5 },  { 20, 15 } };
 	const int expect_lb[2][2]	= { { 10, 10 }, { 18, 17 } };
-	const int expect_rb[2][2]	= { { 20, 0 },  { 20, 15 } };
+	const int expect_rb[2][2]	= { { 15, 5 },  { 20, 15 } };
 	const int expect_cpu0[2][2]	= { { 20, 0 },  { 20, 0 } };
 	const int expect_cpu1[2][2]	= { { 0, 20 },  { 0, 20 } };
 	int port_off = 2, tries = 5, ret;